diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/404.html b/404.html new file mode 100644 index 000000000..f7869b050 --- /dev/null +++ b/404.html @@ -0,0 +1 @@ + Sherlock

404

Uh oh, you found a ... nothing.

But it's probably not what you were looking for, sorry...

You're on this page because you tried to access a URL that doesn't exist or a page that's moved elsewhere. Now, if you really want, you can try to use the "Search" box above to find some more useful things. But you don't have to, your call.
\ No newline at end of file diff --git a/CNAME b/CNAME new file mode 100644 index 000000000..bf2122d60 --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +www.sherlock.stanford.edu diff --git a/assets/images/banner.png b/assets/images/banner.png new file mode 100644 index 000000000..1b6717ccc Binary files /dev/null and b/assets/images/banner.png differ diff --git a/assets/images/bg_hero.jpg b/assets/images/bg_hero.jpg new file mode 100644 index 000000000..a0c1473b5 Binary files /dev/null and b/assets/images/bg_hero.jpg differ diff --git a/assets/images/bg_svc.jpg b/assets/images/bg_svc.jpg new file mode 100644 index 000000000..9b30031a9 Binary files /dev/null and b/assets/images/bg_svc.jpg differ diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 000000000..1cf13b9f9 Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/images/logo.png b/assets/images/logo.png new file mode 100644 index 000000000..85e0a12a1 Binary files /dev/null and b/assets/images/logo.png differ diff --git a/assets/images/logo_small.png b/assets/images/logo_small.png new file mode 100644 index 000000000..203cef64f Binary files /dev/null and b/assets/images/logo_small.png differ diff --git a/assets/images/social/docs/advanced-topics/connection.png b/assets/images/social/docs/advanced-topics/connection.png new file mode 100644 index 000000000..bff844db4 Binary files /dev/null and b/assets/images/social/docs/advanced-topics/connection.png differ diff --git a/assets/images/social/docs/advanced-topics/job-management.png b/assets/images/social/docs/advanced-topics/job-management.png new file mode 100644 index 000000000..9a93a3e8c Binary files /dev/null and b/assets/images/social/docs/advanced-topics/job-management.png differ diff --git a/assets/images/social/docs/advanced-topics/node-features.png b/assets/images/social/docs/advanced-topics/node-features.png new file mode 100644 index 000000000..a3ea752e8 Binary files /dev/null and b/assets/images/social/docs/advanced-topics/node-features.png differ diff --git a/assets/images/social/docs/concepts.png b/assets/images/social/docs/concepts.png new file mode 100644 index 000000000..22dc249e3 Binary files /dev/null and b/assets/images/social/docs/concepts.png differ diff --git a/assets/images/social/docs/credits.png b/assets/images/social/docs/credits.png new file mode 100644 index 000000000..0de152496 Binary files /dev/null and b/assets/images/social/docs/credits.png differ diff --git a/assets/images/social/docs/getting-started/connecting.png b/assets/images/social/docs/getting-started/connecting.png new file mode 100644 index 000000000..b6dd9f298 Binary files /dev/null and b/assets/images/social/docs/getting-started/connecting.png differ diff --git a/assets/images/social/docs/getting-started/index.png b/assets/images/social/docs/getting-started/index.png new file mode 100644 index 000000000..f5fe176c6 Binary files /dev/null and b/assets/images/social/docs/getting-started/index.png differ diff --git a/assets/images/social/docs/getting-started/submitting.png b/assets/images/social/docs/getting-started/submitting.png new file mode 100644 index 000000000..1018e4382 Binary files /dev/null and b/assets/images/social/docs/getting-started/submitting.png differ diff --git a/assets/images/social/docs/glossary.png b/assets/images/social/docs/glossary.png new file mode 100644 index 000000000..6d8adff18 Binary files /dev/null and b/assets/images/social/docs/glossary.png differ diff --git a/assets/images/social/docs/index.png b/assets/images/social/docs/index.png new file mode 100644 index 000000000..79589cfe1 Binary files /dev/null and b/assets/images/social/docs/index.png differ diff --git a/assets/images/social/docs/orders.png b/assets/images/social/docs/orders.png new file mode 100644 index 000000000..5c5519356 Binary files /dev/null and b/assets/images/social/docs/orders.png differ diff --git a/assets/images/social/docs/software/containers/apptainer.png b/assets/images/social/docs/software/containers/apptainer.png new file mode 100644 index 000000000..8db200a8b Binary files /dev/null and b/assets/images/social/docs/software/containers/apptainer.png differ diff --git a/assets/images/social/docs/software/containers/index.png b/assets/images/social/docs/software/containers/index.png new file mode 100644 index 000000000..f864adc2e Binary files /dev/null and b/assets/images/social/docs/software/containers/index.png differ diff --git a/assets/images/social/docs/software/containers/singularity.png b/assets/images/social/docs/software/containers/singularity.png new file mode 100644 index 000000000..8db200a8b Binary files /dev/null and b/assets/images/social/docs/software/containers/singularity.png differ diff --git a/assets/images/social/docs/software/index.png b/assets/images/social/docs/software/index.png new file mode 100644 index 000000000..40f133b47 Binary files /dev/null and b/assets/images/social/docs/software/index.png differ diff --git a/assets/images/social/docs/software/install.png b/assets/images/social/docs/software/install.png new file mode 100644 index 000000000..92eb97df6 Binary files /dev/null and b/assets/images/social/docs/software/install.png differ diff --git a/assets/images/social/docs/software/list.png b/assets/images/social/docs/software/list.png new file mode 100644 index 000000000..85e65c9e0 Binary files /dev/null and b/assets/images/social/docs/software/list.png differ diff --git a/assets/images/social/docs/software/modules.png b/assets/images/social/docs/software/modules.png new file mode 100644 index 000000000..31dbeff13 Binary files /dev/null and b/assets/images/social/docs/software/modules.png differ diff --git a/assets/images/social/docs/software/using/R.png b/assets/images/social/docs/software/using/R.png new file mode 100644 index 000000000..0fdbcd22c Binary files /dev/null and b/assets/images/social/docs/software/using/R.png differ diff --git a/assets/images/social/docs/software/using/anaconda.png b/assets/images/social/docs/software/using/anaconda.png new file mode 100644 index 000000000..48c827bef Binary files /dev/null and b/assets/images/social/docs/software/using/anaconda.png differ diff --git a/assets/images/social/docs/software/using/clustershell.png b/assets/images/social/docs/software/using/clustershell.png new file mode 100644 index 000000000..1c08d6bb7 Binary files /dev/null and b/assets/images/social/docs/software/using/clustershell.png differ diff --git a/assets/images/social/docs/software/using/julia.png b/assets/images/social/docs/software/using/julia.png new file mode 100644 index 000000000..3ed9e5116 Binary files /dev/null and b/assets/images/social/docs/software/using/julia.png differ diff --git a/assets/images/social/docs/software/using/mariadb.png b/assets/images/social/docs/software/using/mariadb.png new file mode 100644 index 000000000..01fd958d4 Binary files /dev/null and b/assets/images/social/docs/software/using/mariadb.png differ diff --git a/assets/images/social/docs/software/using/matlab.png b/assets/images/social/docs/software/using/matlab.png new file mode 100644 index 000000000..b8d680158 Binary files /dev/null and b/assets/images/social/docs/software/using/matlab.png differ diff --git a/assets/images/social/docs/software/using/perl.png b/assets/images/social/docs/software/using/perl.png new file mode 100644 index 000000000..e14e0046c Binary files /dev/null and b/assets/images/social/docs/software/using/perl.png differ diff --git a/assets/images/social/docs/software/using/postgresql.png b/assets/images/social/docs/software/using/postgresql.png new file mode 100644 index 000000000..85cac5f09 Binary files /dev/null and b/assets/images/social/docs/software/using/postgresql.png differ diff --git a/assets/images/social/docs/software/using/python.png b/assets/images/social/docs/software/using/python.png new file mode 100644 index 000000000..4b2864e19 Binary files /dev/null and b/assets/images/social/docs/software/using/python.png differ diff --git a/assets/images/social/docs/software/using/quantum-espresso.png b/assets/images/social/docs/software/using/quantum-espresso.png new file mode 100644 index 000000000..7f20736e8 Binary files /dev/null and b/assets/images/social/docs/software/using/quantum-espresso.png differ diff --git a/assets/images/social/docs/software/using/rclone.png b/assets/images/social/docs/software/using/rclone.png new file mode 100644 index 000000000..9ec597d93 Binary files /dev/null and b/assets/images/social/docs/software/using/rclone.png differ diff --git a/assets/images/social/docs/software/using/schrodinger.png b/assets/images/social/docs/software/using/schrodinger.png new file mode 100644 index 000000000..7a6779761 Binary files /dev/null and b/assets/images/social/docs/software/using/schrodinger.png differ diff --git a/assets/images/social/docs/software/using/spark.png b/assets/images/social/docs/software/using/spark.png new file mode 100644 index 000000000..bdfc8f9fe Binary files /dev/null and b/assets/images/social/docs/software/using/spark.png differ diff --git a/assets/images/social/docs/storage/data-protection.png b/assets/images/social/docs/storage/data-protection.png new file mode 100644 index 000000000..6ff77fd22 Binary files /dev/null and b/assets/images/social/docs/storage/data-protection.png differ diff --git a/assets/images/social/docs/storage/data-sharing.png b/assets/images/social/docs/storage/data-sharing.png new file mode 100644 index 000000000..568ebc3f9 Binary files /dev/null and b/assets/images/social/docs/storage/data-sharing.png differ diff --git a/assets/images/social/docs/storage/data-transfer.png b/assets/images/social/docs/storage/data-transfer.png new file mode 100644 index 000000000..8eedf73d1 Binary files /dev/null and b/assets/images/social/docs/storage/data-transfer.png differ diff --git a/assets/images/social/docs/storage/filesystems.png b/assets/images/social/docs/storage/filesystems.png new file mode 100644 index 000000000..186fb86e3 Binary files /dev/null and b/assets/images/social/docs/storage/filesystems.png differ diff --git a/assets/images/social/docs/storage/index.png b/assets/images/social/docs/storage/index.png new file mode 100644 index 000000000..fe9815c39 Binary files /dev/null and b/assets/images/social/docs/storage/index.png differ diff --git a/assets/images/social/docs/tags.png b/assets/images/social/docs/tags.png new file mode 100644 index 000000000..626df2b3a Binary files /dev/null and b/assets/images/social/docs/tags.png differ diff --git a/assets/images/social/docs/tech/facts.png b/assets/images/social/docs/tech/facts.png new file mode 100644 index 000000000..38c97d2f1 Binary files /dev/null and b/assets/images/social/docs/tech/facts.png differ diff --git a/assets/images/social/docs/tech/index.png b/assets/images/social/docs/tech/index.png new file mode 100644 index 000000000..c64a2e0b2 Binary files /dev/null and b/assets/images/social/docs/tech/index.png differ diff --git a/assets/images/social/docs/tech/status.png b/assets/images/social/docs/tech/status.png new file mode 100644 index 000000000..9e96980d5 Binary files /dev/null and b/assets/images/social/docs/tech/status.png differ diff --git a/assets/images/social/docs/user-guide/gpu.png b/assets/images/social/docs/user-guide/gpu.png new file mode 100644 index 000000000..e5b67fcad Binary files /dev/null and b/assets/images/social/docs/user-guide/gpu.png differ diff --git a/assets/images/social/docs/user-guide/ondemand.png b/assets/images/social/docs/user-guide/ondemand.png new file mode 100644 index 000000000..4d824051b Binary files /dev/null and b/assets/images/social/docs/user-guide/ondemand.png differ diff --git a/assets/images/social/docs/user-guide/running-jobs.png b/assets/images/social/docs/user-guide/running-jobs.png new file mode 100644 index 000000000..66570f963 Binary files /dev/null and b/assets/images/social/docs/user-guide/running-jobs.png differ diff --git a/assets/images/social/docs/user-guide/troubleshoot.png b/assets/images/social/docs/user-guide/troubleshoot.png new file mode 100644 index 000000000..8a6440e66 Binary files /dev/null and b/assets/images/social/docs/user-guide/troubleshoot.png differ diff --git a/assets/images/social/index.png b/assets/images/social/index.png new file mode 100644 index 000000000..6c544d412 Binary files /dev/null and b/assets/images/social/index.png differ diff --git a/assets/javascripts/bundle.8d004be9.min.js b/assets/javascripts/bundle.8d004be9.min.js new file mode 100644 index 000000000..20261e640 --- /dev/null +++ b/assets/javascripts/bundle.8d004be9.min.js @@ -0,0 +1,3 @@ +"use strict";(()=>{var Bi=Object.create;var _r=Object.defineProperty;var Gi=Object.getOwnPropertyDescriptor;var Ji=Object.getOwnPropertyNames,Bt=Object.getOwnPropertySymbols,Xi=Object.getPrototypeOf,Ar=Object.prototype.hasOwnProperty,uo=Object.prototype.propertyIsEnumerable;var fo=(e,t,r)=>t in e?_r(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,j=(e,t)=>{for(var r in t||(t={}))Ar.call(t,r)&&fo(e,r,t[r]);if(Bt)for(var r of Bt(t))uo.call(t,r)&&fo(e,r,t[r]);return e};var ho=(e,t)=>{var r={};for(var o in e)Ar.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Bt)for(var o of Bt(e))t.indexOf(o)<0&&uo.call(e,o)&&(r[o]=e[o]);return r};var Cr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Zi=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Ji(t))!Ar.call(e,n)&&n!==r&&_r(e,n,{get:()=>t[n],enumerable:!(o=Gi(t,n))||o.enumerable});return e};var Gt=(e,t,r)=>(r=e!=null?Bi(Xi(e)):{},Zi(t||!e||!e.__esModule?_r(r,"default",{value:e,enumerable:!0}):r,e));var bo=(e,t,r)=>new Promise((o,n)=>{var i=c=>{try{a(r.next(c))}catch(p){n(p)}},s=c=>{try{a(r.throw(c))}catch(p){n(p)}},a=c=>c.done?o(c.value):Promise.resolve(c.value).then(i,s);a((r=r.apply(e,t)).next())});var go=Cr((Hr,vo)=>{(function(e,t){typeof Hr=="object"&&typeof vo!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Hr,function(){"use strict";function e(r){var o=!0,n=!1,i=null,s={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function a(H){return!!(H&&H!==document&&H.nodeName!=="HTML"&&H.nodeName!=="BODY"&&"classList"in H&&"contains"in H.classList)}function c(H){var ft=H.type,Fe=H.tagName;return!!(Fe==="INPUT"&&s[ft]&&!H.readOnly||Fe==="TEXTAREA"&&!H.readOnly||H.isContentEditable)}function p(H){H.classList.contains("focus-visible")||(H.classList.add("focus-visible"),H.setAttribute("data-focus-visible-added",""))}function l(H){H.hasAttribute("data-focus-visible-added")&&(H.classList.remove("focus-visible"),H.removeAttribute("data-focus-visible-added"))}function f(H){H.metaKey||H.altKey||H.ctrlKey||(a(r.activeElement)&&p(r.activeElement),o=!0)}function u(H){o=!1}function d(H){a(H.target)&&(o||c(H.target))&&p(H.target)}function g(H){a(H.target)&&(H.target.classList.contains("focus-visible")||H.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(H.target))}function L(H){document.visibilityState==="hidden"&&(n&&(o=!0),ee())}function ee(){document.addEventListener("mousemove",Z),document.addEventListener("mousedown",Z),document.addEventListener("mouseup",Z),document.addEventListener("pointermove",Z),document.addEventListener("pointerdown",Z),document.addEventListener("pointerup",Z),document.addEventListener("touchmove",Z),document.addEventListener("touchstart",Z),document.addEventListener("touchend",Z)}function ne(){document.removeEventListener("mousemove",Z),document.removeEventListener("mousedown",Z),document.removeEventListener("mouseup",Z),document.removeEventListener("pointermove",Z),document.removeEventListener("pointerdown",Z),document.removeEventListener("pointerup",Z),document.removeEventListener("touchmove",Z),document.removeEventListener("touchstart",Z),document.removeEventListener("touchend",Z)}function Z(H){H.target.nodeName&&H.target.nodeName.toLowerCase()==="html"||(o=!1,ne())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",L,!0),ee(),r.addEventListener("focus",d,!0),r.addEventListener("blur",g,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var io=Cr((Vt,no)=>{(function(t,r){typeof Vt=="object"&&typeof no=="object"?no.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Vt=="object"?Vt.ClipboardJS=r():t.ClipboardJS=r()})(Vt,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Yi}});var s=i(279),a=i.n(s),c=i(370),p=i.n(c),l=i(817),f=i.n(l);function u(z){try{return document.execCommand(z)}catch(C){return!1}}var d=function(C){var _=f()(C);return u("cut"),_},g=d;function L(z){var C=document.documentElement.getAttribute("dir")==="rtl",_=document.createElement("textarea");_.style.fontSize="12pt",_.style.border="0",_.style.padding="0",_.style.margin="0",_.style.position="absolute",_.style[C?"right":"left"]="-9999px";var D=window.pageYOffset||document.documentElement.scrollTop;return _.style.top="".concat(D,"px"),_.setAttribute("readonly",""),_.value=z,_}var ee=function(C,_){var D=L(C);_.container.appendChild(D);var N=f()(D);return u("copy"),D.remove(),N},ne=function(C){var _=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},D="";return typeof C=="string"?D=ee(C,_):C instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(C==null?void 0:C.type)?D=ee(C.value,_):(D=f()(C),u("copy")),D},Z=ne;function H(z){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?H=function(_){return typeof _}:H=function(_){return _&&typeof Symbol=="function"&&_.constructor===Symbol&&_!==Symbol.prototype?"symbol":typeof _},H(z)}var ft=function(){var C=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},_=C.action,D=_===void 0?"copy":_,N=C.container,G=C.target,Ue=C.text;if(D!=="copy"&&D!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(G!==void 0)if(G&&H(G)==="object"&&G.nodeType===1){if(D==="copy"&&G.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(D==="cut"&&(G.hasAttribute("readonly")||G.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(Ue)return Z(Ue,{container:N});if(G)return D==="cut"?g(G):Z(G,{container:N})},Fe=ft;function R(z){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?R=function(_){return typeof _}:R=function(_){return _&&typeof Symbol=="function"&&_.constructor===Symbol&&_!==Symbol.prototype?"symbol":typeof _},R(z)}function se(z,C){if(!(z instanceof C))throw new TypeError("Cannot call a class as a function")}function ce(z,C){for(var _=0;_0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof N.action=="function"?N.action:this.defaultAction,this.target=typeof N.target=="function"?N.target:this.defaultTarget,this.text=typeof N.text=="function"?N.text:this.defaultText,this.container=R(N.container)==="object"?N.container:document.body}},{key:"listenClick",value:function(N){var G=this;this.listener=p()(N,"click",function(Ue){return G.onClick(Ue)})}},{key:"onClick",value:function(N){var G=N.delegateTarget||N.currentTarget,Ue=this.action(G)||"copy",Yt=Fe({action:Ue,container:this.container,target:this.target(G),text:this.text(G)});this.emit(Yt?"success":"error",{action:Ue,text:Yt,trigger:G,clearSelection:function(){G&&G.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(N){return Mr("action",N)}},{key:"defaultTarget",value:function(N){var G=Mr("target",N);if(G)return document.querySelector(G)}},{key:"defaultText",value:function(N){return Mr("text",N)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(N){var G=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return Z(N,G)}},{key:"cut",value:function(N){return g(N)}},{key:"isSupported",value:function(){var N=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],G=typeof N=="string"?[N]:N,Ue=!!document.queryCommandSupported;return G.forEach(function(Yt){Ue=Ue&&!!document.queryCommandSupported(Yt)}),Ue}}]),_}(a()),Yi=Qi},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function s(a,c){for(;a&&a.nodeType!==n;){if(typeof a.matches=="function"&&a.matches(c))return a;a=a.parentNode}}o.exports=s},438:function(o,n,i){var s=i(828);function a(l,f,u,d,g){var L=p.apply(this,arguments);return l.addEventListener(u,L,g),{destroy:function(){l.removeEventListener(u,L,g)}}}function c(l,f,u,d,g){return typeof l.addEventListener=="function"?a.apply(null,arguments):typeof u=="function"?a.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(L){return a(L,f,u,d,g)}))}function p(l,f,u,d){return function(g){g.delegateTarget=s(g.target,f),g.delegateTarget&&d.call(l,g)}}o.exports=c},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var s=Object.prototype.toString.call(i);return i!==void 0&&(s==="[object NodeList]"||s==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var s=Object.prototype.toString.call(i);return s==="[object Function]"}},370:function(o,n,i){var s=i(879),a=i(438);function c(u,d,g){if(!u&&!d&&!g)throw new Error("Missing required arguments");if(!s.string(d))throw new TypeError("Second argument must be a String");if(!s.fn(g))throw new TypeError("Third argument must be a Function");if(s.node(u))return p(u,d,g);if(s.nodeList(u))return l(u,d,g);if(s.string(u))return f(u,d,g);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function p(u,d,g){return u.addEventListener(d,g),{destroy:function(){u.removeEventListener(d,g)}}}function l(u,d,g){return Array.prototype.forEach.call(u,function(L){L.addEventListener(d,g)}),{destroy:function(){Array.prototype.forEach.call(u,function(L){L.removeEventListener(d,g)})}}}function f(u,d,g){return a(document.body,u,d,g)}o.exports=c},817:function(o){function n(i){var s;if(i.nodeName==="SELECT")i.focus(),s=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var a=i.hasAttribute("readonly");a||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),a||i.removeAttribute("readonly"),s=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var c=window.getSelection(),p=document.createRange();p.selectNodeContents(i),c.removeAllRanges(),c.addRange(p),s=c.toString()}return s}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,s,a){var c=this.e||(this.e={});return(c[i]||(c[i]=[])).push({fn:s,ctx:a}),this},once:function(i,s,a){var c=this;function p(){c.off(i,p),s.apply(a,arguments)}return p._=s,this.on(i,p,a)},emit:function(i){var s=[].slice.call(arguments,1),a=((this.e||(this.e={}))[i]||[]).slice(),c=0,p=a.length;for(c;c{"use strict";var fs=/["'&<>]/;di.exports=us;function us(e){var t=""+e,r=fs.exec(t);if(!r)return t;var o,n="",i=0,s=0;for(i=r.index;i0&&i[i.length-1])&&(p[0]===6||p[0]===2)){r=0;continue}if(p[0]===3&&(!i||p[1]>i[0]&&p[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function q(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],s;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(a){s={error:a}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(s)throw s.error}}return i}function B(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||a(u,d)})})}function a(u,d){try{c(o[u](d))}catch(g){f(i[0][3],g)}}function c(u){u.value instanceof ut?Promise.resolve(u.value.v).then(p,l):f(i[0][2],u)}function p(u){a("next",u)}function l(u){a("throw",u)}function f(u,d){u(d),i.shift(),i.length&&a(i[0][0],i[0][1])}}function Eo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof Oe=="function"?Oe(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(s){return new Promise(function(a,c){s=e[i](s),n(a,c,s.done,s.value)})}}function n(i,s,a,c){Promise.resolve(c).then(function(p){i({value:p,done:a})},s)}}function P(e){return typeof e=="function"}function xt(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var Xt=xt(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function Xe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var ze=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var s=this._parentage;if(s)if(this._parentage=null,Array.isArray(s))try{for(var a=Oe(s),c=a.next();!c.done;c=a.next()){var p=c.value;p.remove(this)}}catch(L){t={error:L}}finally{try{c&&!c.done&&(r=a.return)&&r.call(a)}finally{if(t)throw t.error}}else s.remove(this);var l=this.initialTeardown;if(P(l))try{l()}catch(L){i=L instanceof Xt?L.errors:[L]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=Oe(f),d=u.next();!d.done;d=u.next()){var g=d.value;try{wo(g)}catch(L){i=i!=null?i:[],L instanceof Xt?i=B(B([],q(i)),q(L.errors)):i.push(L)}}}catch(L){o={error:L}}finally{try{d&&!d.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new Xt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)wo(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Xe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Xe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var $r=ze.EMPTY;function Zt(e){return e instanceof ze||e&&"closed"in e&&P(e.remove)&&P(e.add)&&P(e.unsubscribe)}function wo(e){P(e)?e():e.unsubscribe()}var We={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var yt={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,s=n.isStopped,a=n.observers;return i||s?$r:(this.currentObservers=null,a.push(r),new ze(function(){o.currentObservers=null,Xe(a,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,s=o.isStopped;n?r.error(i):s&&r.complete()},t.prototype.asObservable=function(){var r=new I;return r.source=this,r},t.create=function(r,o){return new Co(r,o)},t}(I);var Co=function(e){ie(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:$r},t}(T);var jr=function(e){ie(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(T);var Pt={now:function(){return(Pt.delegate||Date).now()},delegate:void 0};var It=function(e){ie(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=Pt);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,s=o._infiniteTimeWindow,a=o._timestampProvider,c=o._windowTime;n||(i.push(r),!s&&i.push(a.now()+c)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,s=n._buffer,a=s.slice(),c=0;c0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(St);var $o=function(e){ie(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(Ot);var Dr=new $o(ko);var Ro=function(e){ie(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=Tt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var s=r.actions;o!=null&&((i=s[s.length-1])===null||i===void 0?void 0:i.id)!==o&&(Tt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(St);var Po=function(e){ie(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(Ot);var ge=new Po(Ro);var x=new I(function(e){return e.complete()});function rr(e){return e&&P(e.schedule)}function Nr(e){return e[e.length-1]}function ct(e){return P(Nr(e))?e.pop():void 0}function Ie(e){return rr(Nr(e))?e.pop():void 0}function or(e,t){return typeof Nr(e)=="number"?e.pop():t}var Lt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function nr(e){return P(e==null?void 0:e.then)}function ir(e){return P(e[wt])}function ar(e){return Symbol.asyncIterator&&P(e==null?void 0:e[Symbol.asyncIterator])}function sr(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function ca(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var cr=ca();function pr(e){return P(e==null?void 0:e[cr])}function lr(e){return yo(this,arguments,function(){var r,o,n,i;return Jt(this,function(s){switch(s.label){case 0:r=e.getReader(),s.label=1;case 1:s.trys.push([1,,9,10]),s.label=2;case 2:return[4,ut(r.read())];case 3:return o=s.sent(),n=o.value,i=o.done,i?[4,ut(void 0)]:[3,5];case 4:return[2,s.sent()];case 5:return[4,ut(n)];case 6:return[4,s.sent()];case 7:return s.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function mr(e){return P(e==null?void 0:e.getReader)}function U(e){if(e instanceof I)return e;if(e!=null){if(ir(e))return pa(e);if(Lt(e))return la(e);if(nr(e))return ma(e);if(ar(e))return Io(e);if(pr(e))return fa(e);if(mr(e))return ua(e)}throw sr(e)}function pa(e){return new I(function(t){var r=e[wt]();if(P(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function la(e){return new I(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?v(function(n,i){return e(n,i,o)}):be,Ee(1),r?rt(t):Zo(function(){return new ur}))}}function Yr(e){return e<=0?function(){return x}:E(function(t,r){var o=[];t.subscribe(w(r,function(n){o.push(n),e=2,!0))}function le(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new T}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,s=i===void 0?!0:i,a=e.resetOnRefCountZero,c=a===void 0?!0:a;return function(p){var l,f,u,d=0,g=!1,L=!1,ee=function(){f==null||f.unsubscribe(),f=void 0},ne=function(){ee(),l=u=void 0,g=L=!1},Z=function(){var H=l;ne(),H==null||H.unsubscribe()};return E(function(H,ft){d++,!L&&!g&&ee();var Fe=u=u!=null?u:r();ft.add(function(){d--,d===0&&!L&&!g&&(f=Br(Z,c))}),Fe.subscribe(ft),!l&&d>0&&(l=new ht({next:function(R){return Fe.next(R)},error:function(R){L=!0,ee(),f=Br(ne,n,R),Fe.error(R)},complete:function(){g=!0,ee(),f=Br(ne,s),Fe.complete()}}),U(H).subscribe(l))})(p)}}function Br(e,t){for(var r=[],o=2;oe.next(document)),e}function M(e,t=document){return Array.from(t.querySelectorAll(e))}function F(e,t=document){let r=ue(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function ue(e,t=document){return t.querySelector(e)||void 0}function Ve(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var Ha=O(h(document.body,"focusin"),h(document.body,"focusout")).pipe(Ae(1),K(void 0),m(()=>Ve()||document.body),X(1));function Ke(e){return Ha.pipe(m(t=>e.contains(t)),Y())}function nt(e,t){return k(()=>O(h(e,"mouseenter").pipe(m(()=>!0)),h(e,"mouseleave").pipe(m(()=>!1))).pipe(t?jt(r=>ke(+!r*t)):be,K(e.matches(":hover"))))}function nn(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)nn(e,r)}function y(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)nn(o,n);return o}function br(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function At(e){let t=y("script",{src:e});return k(()=>(document.head.appendChild(t),O(h(t,"load"),h(t,"error").pipe(b(()=>Vr(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),A(()=>document.head.removeChild(t)),Ee(1))))}var an=new T,ka=k(()=>typeof ResizeObserver=="undefined"?At("https://unpkg.com/resize-observer-polyfill"):$(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>an.next(t)))),b(e=>O(et,$(e)).pipe(A(()=>e.disconnect()))),X(1));function de(e){return{width:e.offsetWidth,height:e.offsetHeight}}function Le(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return ka.pipe(S(r=>r.observe(t)),b(r=>an.pipe(v(o=>o.target===t),A(()=>r.unobserve(t)))),m(()=>de(e)),K(de(e)))}function Ct(e){return{width:e.scrollWidth,height:e.scrollHeight}}function vr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function sn(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Qe(e){return{x:e.offsetLeft,y:e.offsetTop}}function cn(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function pn(e){return O(h(window,"load"),h(window,"resize")).pipe($e(0,ge),m(()=>Qe(e)),K(Qe(e)))}function gr(e){return{x:e.scrollLeft,y:e.scrollTop}}function Ye(e){return O(h(e,"scroll"),h(window,"scroll"),h(window,"resize")).pipe($e(0,ge),m(()=>gr(e)),K(gr(e)))}var ln=new T,$a=k(()=>$(new IntersectionObserver(e=>{for(let t of e)ln.next(t)},{threshold:0}))).pipe(b(e=>O(et,$(e)).pipe(A(()=>e.disconnect()))),X(1));function lt(e){return $a.pipe(S(t=>t.observe(e)),b(t=>ln.pipe(v(({target:r})=>r===e),A(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function mn(e,t=16){return Ye(e).pipe(m(({y:r})=>{let o=de(e),n=Ct(e);return r>=n.height-o.height-t}),Y())}var xr={drawer:F("[data-md-toggle=drawer]"),search:F("[data-md-toggle=search]")};function fn(e){return xr[e].checked}function it(e,t){xr[e].checked!==t&&xr[e].click()}function Be(e){let t=xr[e];return h(t,"change").pipe(m(()=>t.checked),K(t.checked))}function Ra(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Pa(){return O(h(window,"compositionstart").pipe(m(()=>!0)),h(window,"compositionend").pipe(m(()=>!1))).pipe(K(!1))}function un(){let e=h(window,"keydown").pipe(v(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:fn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),v(({mode:t,type:r})=>{if(t==="global"){let o=Ve();if(typeof o!="undefined")return!Ra(o,r)}return!0}),le());return Pa().pipe(b(t=>t?x:e))}function we(){return new URL(location.href)}function at(e,t=!1){if(Q("navigation.instant")&&!t){let r=y("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function dn(){return new T}function hn(){return location.hash.slice(1)}function bn(e){let t=y("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Zr(e){return O(h(window,"hashchange"),e).pipe(m(hn),K(hn()),v(t=>t.length>0),X(1))}function vn(e){return Zr(e).pipe(m(t=>ue(`[id="${t}"]`)),v(t=>typeof t!="undefined"))}function Wt(e){let t=matchMedia(e);return dr(r=>t.addListener(()=>r(t.matches))).pipe(K(t.matches))}function gn(){let e=matchMedia("print");return O(h(window,"beforeprint").pipe(m(()=>!0)),h(window,"afterprint").pipe(m(()=>!1))).pipe(K(e.matches))}function eo(e,t){return e.pipe(b(r=>r?t():x))}function to(e,t){return new I(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let s=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+s*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function Ge(e,t){return to(e,t).pipe(b(r=>r.text()),m(r=>JSON.parse(r)),X(1))}function yr(e,t){let r=new DOMParser;return to(e,t).pipe(b(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),X(1))}function xn(e,t){let r=new DOMParser;return to(e,t).pipe(b(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),X(1))}function yn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function En(){return O(h(window,"scroll",{passive:!0}),h(window,"resize",{passive:!0})).pipe(m(yn),K(yn()))}function wn(){return{width:innerWidth,height:innerHeight}}function Tn(){return h(window,"resize",{passive:!0}).pipe(m(wn),K(wn()))}function Sn(){return V([En(),Tn()]).pipe(m(([e,t])=>({offset:e,size:t})),X(1))}function Er(e,{viewport$:t,header$:r}){let o=t.pipe(oe("size")),n=V([o,r]).pipe(m(()=>Qe(e)));return V([r,t,n]).pipe(m(([{height:i},{offset:s,size:a},{x:c,y:p}])=>({offset:{x:s.x-c,y:s.y-p+i},size:a})))}function Ia(e){return h(e,"message",t=>t.data)}function Fa(e){let t=new T;return t.subscribe(r=>e.postMessage(r)),t}function On(e,t=new Worker(e)){let r=Ia(t),o=Fa(t),n=new T;n.subscribe(o);let i=o.pipe(re(),ae(!0));return n.pipe(re(),Ne(r.pipe(W(i))),le())}var ja=F("#__config"),Ht=JSON.parse(ja.textContent);Ht.base=`${new URL(Ht.base,we())}`;function Te(){return Ht}function Q(e){return Ht.features.includes(e)}function Me(e,t){return typeof t!="undefined"?Ht.translations[e].replace("#",t.toString()):Ht.translations[e]}function Ce(e,t=document){return F(`[data-md-component=${e}]`,t)}function me(e,t=document){return M(`[data-md-component=${e}]`,t)}function Ua(e){let t=F(".md-typeset > :first-child",e);return h(t,"click",{once:!0}).pipe(m(()=>F(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function Ln(e){if(!Q("announce.dismiss")||!e.childElementCount)return x;if(!e.hidden){let t=F(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return k(()=>{let t=new T;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),Ua(e).pipe(S(r=>t.next(r)),A(()=>t.complete()),m(r=>j({ref:e},r)))})}function Wa(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function Mn(e,t){let r=new T;return r.subscribe(({hidden:o})=>{e.hidden=o}),Wa(e,t).pipe(S(o=>r.next(o)),A(()=>r.complete()),m(o=>j({ref:e},o)))}function Dt(e,t){return t==="inline"?y("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},y("div",{class:"md-tooltip__inner md-typeset"})):y("div",{class:"md-tooltip",id:e,role:"tooltip"},y("div",{class:"md-tooltip__inner md-typeset"}))}function wr(...e){return y("div",{class:"md-tooltip2",role:"dialog"},y("div",{class:"md-tooltip2__inner md-typeset"},e))}function _n(...e){return y("div",{class:"md-tooltip2",role:"tooltip"},y("div",{class:"md-tooltip2__inner md-typeset"},e))}function An(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return y("aside",{class:"md-annotation",tabIndex:0},Dt(t),y("a",{href:r,class:"md-annotation__index",tabIndex:-1},y("span",{"data-md-annotation-id":e})))}else return y("aside",{class:"md-annotation",tabIndex:0},Dt(t),y("span",{class:"md-annotation__index",tabIndex:-1},y("span",{"data-md-annotation-id":e})))}function Cn(e){return y("button",{class:"md-code__button",title:Me("clipboard.copy"),"data-clipboard-target":`#${e} > code`,"data-md-type":"copy"})}function Hn(){return y("button",{class:"md-code__button",title:"Toggle line selection","data-md-type":"select"})}function kn(){return y("nav",{class:"md-code__nav"})}function ro(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(c=>!e.terms[c]).reduce((c,p)=>[...c,y("del",null,p)," "],[]).slice(0,-1),i=Te(),s=new URL(e.location,i.base);Q("search.highlight")&&s.searchParams.set("h",Object.entries(e.terms).filter(([,c])=>c).reduce((c,[p])=>`${c} ${p}`.trim(),""));let{tags:a}=Te();return y("a",{href:`${s}`,class:"md-search-result__link",tabIndex:-1},y("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&y("div",{class:"md-search-result__icon md-icon"}),r>0&&y("h1",null,e.title),r<=0&&y("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&e.tags.map(c=>{let p=a?c in a?`md-tag-icon md-tag--${a[c]}`:"md-tag-icon":"";return y("span",{class:`md-tag ${p}`},c)}),o>0&&n.length>0&&y("p",{class:"md-search-result__terms"},Me("search.result.term.missing"),": ",...n)))}function $n(e){let t=e[0].score,r=[...e],o=Te(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),s=r.findIndex(l=>l.scorero(l,1)),...c.length?[y("details",{class:"md-search-result__more"},y("summary",{tabIndex:-1},y("div",null,c.length>0&&c.length===1?Me("search.result.more.one"):Me("search.result.more.other",c.length))),...c.map(l=>ro(l,1)))]:[]];return y("li",{class:"md-search-result__item"},p)}function Rn(e){return y("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>y("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?br(r):r)))}function oo(e){let t=`tabbed-control tabbed-control--${e}`;return y("div",{class:t,hidden:!0},y("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function Pn(e){return y("div",{class:"md-typeset__scrollwrap"},y("div",{class:"md-typeset__table"},e))}function Da(e){var o;let t=Te(),r=new URL(`../${e.version}/`,t.base);return y("li",{class:"md-version__item"},y("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length&&y("span",{class:"md-version__alias"},e.aliases[0])))}function In(e,t){var o;let r=Te();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),y("div",{class:"md-version"},y("button",{class:"md-version__current","aria-label":Me("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length&&y("span",{class:"md-version__alias"},t.aliases[0])),y("ul",{class:"md-version__list"},e.map(Da)))}var Na=0;function Va(e,t=250){let r=V([Ke(e),nt(e,t)]).pipe(m(([n,i])=>n||i),Y()),o=k(()=>sn(e)).pipe(J(Ye),gt(1),m(()=>cn(e)));return r.pipe(Re(n=>n),b(()=>V([r,o])),m(([n,i])=>({active:n,offset:i})),le())}function Nt(e,t,r=250){let{content$:o,viewport$:n}=t,i=`__tooltip2_${Na++}`;return k(()=>{let s=new T,a=new jr(!1);s.pipe(re(),ae(!1)).subscribe(a);let c=a.pipe(jt(l=>ke(+!l*250,Dr)),Y(),b(l=>l?o:x),S(l=>l.id=i),le());V([s.pipe(m(({active:l})=>l)),c.pipe(b(l=>nt(l,250)),K(!1))]).pipe(m(l=>l.some(f=>f))).subscribe(a);let p=a.pipe(v(l=>l),te(c,n),m(([l,f,{size:u}])=>{let d=e.getBoundingClientRect(),g=d.width/2;if(f.role==="tooltip")return{x:g,y:8+d.height};if(d.y>=u.height/2){let{height:L}=de(f);return{x:g,y:-16-L}}else return{x:g,y:16+d.height}}));return V([c,s,p]).subscribe(([l,{offset:f},u])=>{l.style.setProperty("--md-tooltip-host-x",`${f.x}px`),l.style.setProperty("--md-tooltip-host-y",`${f.y}px`),l.style.setProperty("--md-tooltip-x",`${u.x}px`),l.style.setProperty("--md-tooltip-y",`${u.y}px`),l.classList.toggle("md-tooltip2--top",u.y<0),l.classList.toggle("md-tooltip2--bottom",u.y>=0)}),a.pipe(v(l=>l),te(c,(l,f)=>f),v(l=>l.role==="tooltip")).subscribe(l=>{let f=de(F(":scope > *",l));l.style.setProperty("--md-tooltip-width",`${f.width}px`),l.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(Y(),xe(ge),te(c)).subscribe(([l,f])=>{f.classList.toggle("md-tooltip2--active",l)}),V([a.pipe(v(l=>l)),c]).subscribe(([l,f])=>{f.role==="dialog"?(e.setAttribute("aria-controls",i),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",i)}),a.pipe(v(l=>!l)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),Va(e,r).pipe(S(l=>s.next(l)),A(()=>s.complete()),m(l=>j({ref:e},l)))})}function Je(e,{viewport$:t},r=document.body){return Nt(e,{content$:new I(o=>{let n=e.title,i=_n(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t},0)}function za(e,t){let r=k(()=>V([pn(e),Ye(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:s,height:a}=de(e);return{x:o-i.x+s/2,y:n-i.y+a/2}}));return Ke(e).pipe(b(o=>r.pipe(m(n=>({active:o,offset:n})),Ee(+!o||1/0))))}function Fn(e,t,{target$:r}){let[o,n]=Array.from(e.children);return k(()=>{let i=new T,s=i.pipe(re(),ae(!0));return i.subscribe({next({offset:a}){e.style.setProperty("--md-tooltip-x",`${a.x}px`),e.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),lt(e).pipe(W(s)).subscribe(a=>{e.toggleAttribute("data-md-visible",a)}),O(i.pipe(v(({active:a})=>a)),i.pipe(Ae(250),v(({active:a})=>!a))).subscribe({next({active:a}){a?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe($e(16,ge)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(gt(125,ge),v(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?e.style.setProperty("--md-tooltip-0",`${-a}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),h(n,"click").pipe(W(s),v(a=>!(a.metaKey||a.ctrlKey))).subscribe(a=>{a.stopPropagation(),a.preventDefault()}),h(n,"mousedown").pipe(W(s),te(i)).subscribe(([a,{active:c}])=>{var p;if(a.button!==0||a.metaKey||a.ctrlKey)a.preventDefault();else if(c){a.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(p=Ve())==null||p.blur()}}),r.pipe(W(s),v(a=>a===o),ot(125)).subscribe(()=>e.focus()),za(e,t).pipe(S(a=>i.next(a)),A(()=>i.complete()),m(a=>j({ref:e},a)))})}function qa(e){let t=Te();if(e.tagName!=="CODE")return[e];let r=[".c",".c1",".cm"];if(typeof t.annotate!="undefined"){let o=e.closest("[class|=language]");if(o)for(let n of Array.from(o.classList)){if(!n.startsWith("language-"))continue;let[,i]=n.split("-");i in t.annotate&&r.push(...t.annotate[i])}}return M(r.join(", "),e)}function Ka(e){let t=[];for(let r of qa(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let s;for(;s=/(\(\d+\))(!)?/.exec(i.textContent);){let[,a,c]=s;if(typeof c=="undefined"){let p=i.splitText(s.index);i=p.splitText(a.length),t.push(p)}else{i.textContent=a,t.push(i);break}}}}return t}function jn(e,t){t.append(...Array.from(e.childNodes))}function Tr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,s=new Map;for(let a of Ka(t)){let[,c]=a.textContent.match(/\((\d+)\)/);ue(`:scope > li:nth-child(${c})`,e)&&(s.set(c,An(c,i)),a.replaceWith(s.get(c)))}return s.size===0?x:k(()=>{let a=new T,c=a.pipe(re(),ae(!0)),p=[];for(let[l,f]of s)p.push([F(".md-typeset",f),F(`:scope > li:nth-child(${l})`,e)]);return o.pipe(W(c)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of p)l?jn(f,u):jn(u,f)}),O(...[...s].map(([,l])=>Fn(l,t,{target$:r}))).pipe(A(()=>a.complete()),le())})}function Un(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return Un(t)}}function Wn(e,t){return k(()=>{let r=Un(e);return typeof r!="undefined"?Tr(r,e,t):x})}var Nn=Gt(io());var Qa=0,Dn=O(h(window,"keydown").pipe(m(()=>!0)),O(h(window,"keyup"),h(window,"contextmenu")).pipe(m(()=>!1))).pipe(K(!1),X(1));function Vn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return Vn(t)}}function Ya(e){return Le(e).pipe(m(({width:t})=>({scrollable:Ct(e).width>t})),oe("scrollable"))}function zn(e,t){let{matches:r}=matchMedia("(hover)"),o=k(()=>{let n=new T,i=n.pipe(Yr(1));n.subscribe(({scrollable:d})=>{d&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let s=[],a=e.closest("pre"),c=a.closest("[id]"),p=c?c.id:Qa++;a.id=`__code_${p}`;let l=[],f=e.closest(".highlight");if(f instanceof HTMLElement){let d=Vn(f);if(typeof d!="undefined"&&(f.classList.contains("annotate")||Q("content.code.annotate"))){let g=Tr(d,e,t);l.push(Le(f).pipe(W(i),m(({width:L,height:ee})=>L&&ee),Y(),b(L=>L?g:x)))}}let u=M(":scope > span[id]",e);if(u.length&&(e.classList.add("md-code__content"),e.closest(".select")||Q("content.code.select")&&!e.closest(".no-select"))){let d=+u[0].id.split("-").pop(),g=Hn();s.push(g),Q("content.tooltips")&&l.push(Je(g,{viewport$}));let L=h(g,"click").pipe(Ut(R=>!R,!1),S(()=>g.blur()),le());L.subscribe(R=>{g.classList.toggle("md-code__button--active",R)});let ee=fe(u).pipe(J(R=>nt(R).pipe(m(se=>[R,se]))));L.pipe(b(R=>R?ee:x)).subscribe(([R,se])=>{let ce=ue(".hll.select",R);if(ce&&!se)ce.replaceWith(...Array.from(ce.childNodes));else if(!ce&&se){let he=document.createElement("span");he.className="hll select",he.append(...Array.from(R.childNodes).slice(1)),R.append(he)}});let ne=fe(u).pipe(J(R=>h(R,"mousedown").pipe(S(se=>se.preventDefault()),m(()=>R)))),Z=L.pipe(b(R=>R?ne:x),te(Dn),m(([R,se])=>{var he;let ce=u.indexOf(R)+d;if(se===!1)return[ce,ce];{let Se=M(".hll",e).map(je=>u.indexOf(je.parentElement)+d);return(he=window.getSelection())==null||he.removeAllRanges(),[Math.min(ce,...Se),Math.max(ce,...Se)]}})),H=Zr(x).pipe(v(R=>R.startsWith(`__codelineno-${p}-`)));H.subscribe(R=>{let[,,se]=R.split("-"),ce=se.split(":").map(Se=>+Se-d+1);ce.length===1&&ce.push(ce[0]);for(let Se of M(".hll:not(.select)",e))Se.replaceWith(...Array.from(Se.childNodes));let he=u.slice(ce[0]-1,ce[1]);for(let Se of he){let je=document.createElement("span");je.className="hll",je.append(...Array.from(Se.childNodes).slice(1)),Se.append(je)}}),H.pipe(Ee(1),xe(pe)).subscribe(R=>{if(R.includes(":")){let se=document.getElementById(R.split(":")[0]);se&&setTimeout(()=>{let ce=se,he=-64;for(;ce!==document.body;)he+=ce.offsetTop,ce=ce.offsetParent;window.scrollTo({top:he})},1)}});let Fe=fe(M('a[href^="#__codelineno"]',f)).pipe(J(R=>h(R,"click").pipe(S(se=>se.preventDefault()),m(()=>R)))).pipe(W(i),te(Dn),m(([R,se])=>{let he=+F(`[id="${R.hash.slice(1)}"]`).parentElement.id.split("-").pop();if(se===!1)return[he,he];{let Se=M(".hll",e).map(je=>+je.parentElement.id.split("-").pop());return[Math.min(he,...Se),Math.max(he,...Se)]}}));O(Z,Fe).subscribe(R=>{let se=`#__codelineno-${p}-`;R[0]===R[1]?se+=R[0]:se+=`${R[0]}:${R[1]}`,history.replaceState({},"",se),window.dispatchEvent(new HashChangeEvent("hashchange",{newURL:window.location.origin+window.location.pathname+se,oldURL:window.location.href}))})}if(Nn.default.isSupported()&&(e.closest(".copy")||Q("content.code.copy")&&!e.closest(".no-copy"))){let d=Cn(a.id);s.push(d),Q("content.tooltips")&&l.push(Je(d,{viewport$}))}if(s.length){let d=kn();d.append(...s),a.insertBefore(d,e)}return Ya(e).pipe(S(d=>n.next(d)),A(()=>n.complete()),m(d=>j({ref:e},d)),Ne(O(...l).pipe(W(i))))});return Q("content.lazy")?lt(e).pipe(v(n=>n),Ee(1),b(()=>o)):o}function Ba(e,{target$:t,print$:r}){let o=!0;return O(t.pipe(m(n=>n.closest("details:not([open])")),v(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(v(n=>n||!o),S(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function qn(e,t){return k(()=>{let r=new T;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),Ba(e,t).pipe(S(o=>r.next(o)),A(()=>r.complete()),m(o=>j({ref:e},o)))})}function Ga(e){let t=document.createElement("h3");t.innerHTML=e.innerHTML;let r=[t],o=e.nextElementSibling;for(;o&&!(o instanceof HTMLHeadingElement);)r.push(o),o=o.nextElementSibling;return r}function Ja(e,t){for(let r of M("[href], [src]",e))for(let o of["href","src"]){let n=r.getAttribute(o);if(n&&!/^(?:[a-z]+:)?\/\//i.test(n)){r[o]=new URL(r.getAttribute(o),t).toString();break}}return $(e)}function Kn(e,t){let{sitemap$:r}=t;if(!(e instanceof HTMLAnchorElement))return x;if(!(Q("navigation.instant.preview")||e.hasAttribute("data-preview")))return x;let o=V([Ke(e),nt(e)]).pipe(m(([i,s])=>i||s),Y(),v(i=>i));return bt([r,o]).pipe(b(([i])=>{let s=new URL(e.href);return s.search=s.hash="",i.has(`${s}`)?$(s):x}),b(i=>yr(i).pipe(b(s=>Ja(s,i)))),b(i=>{let s=e.hash?`article [id="${e.hash.slice(1)}"]`:"article h1",a=ue(s,i);return typeof a=="undefined"?x:$(Ga(a))})).pipe(b(i=>{let s=new I(a=>{let c=wr(...i);return a.next(c),document.body.append(c),()=>c.remove()});return Nt(e,j({content$:s},t))}))}var Qn=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var ao,Za=0;function es(){return typeof mermaid=="undefined"||mermaid instanceof Element?At("https://unpkg.com/mermaid@10/dist/mermaid.min.js"):$(void 0)}function Yn(e){return e.classList.remove("mermaid"),ao||(ao=es().pipe(S(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Qn,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),X(1))),ao.subscribe(()=>bo(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${Za++}`,r=y("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),s=r.attachShadow({mode:"closed"});s.innerHTML=n,e.replaceWith(r),i==null||i(s)})),ao.pipe(m(()=>({ref:e})))}var Bn=y("table");function Gn(e){return e.replaceWith(Bn),Bn.replaceWith(Pn(e)),$({ref:e})}function ts(e){let t=e.find(r=>r.checked)||e[0];return O(...e.map(r=>h(r,"change").pipe(m(()=>F(`label[for="${r.id}"]`))))).pipe(K(F(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Jn(e,{viewport$:t,target$:r}){let o=F(".tabbed-labels",e),n=M(":scope > input",e),i=oo("prev");e.append(i);let s=oo("next");return e.append(s),k(()=>{let a=new T,c=a.pipe(re(),ae(!0));V([a,Le(e)]).pipe(W(c),$e(1,ge)).subscribe({next([{active:p},l]){let f=Qe(p),{width:u}=de(p);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let d=gr(o);(f.xd.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),V([Ye(o),Le(o)]).pipe(W(c)).subscribe(([p,l])=>{let f=Ct(o);i.hidden=p.x<16,s.hidden=p.x>f.width-l.width-16}),O(h(i,"click").pipe(m(()=>-1)),h(s,"click").pipe(m(()=>1))).pipe(W(c)).subscribe(p=>{let{width:l}=de(o);o.scrollBy({left:l*p,behavior:"smooth"})}),r.pipe(W(c),v(p=>n.includes(p))).subscribe(p=>p.click()),o.classList.add("tabbed-labels--linked");for(let p of n){let l=F(`label[for="${p.id}"]`);l.replaceChildren(y("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),h(l.firstElementChild,"click").pipe(W(c),v(f=>!(f.metaKey||f.ctrlKey)),S(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return Q("content.tabs.link")&&a.pipe(Pe(1),te(t)).subscribe(([{active:p},{offset:l}])=>{let f=p.innerText.trim();if(p.hasAttribute("data-md-switching"))p.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let g of M("[data-tabs]"))for(let L of M(":scope > input",g)){let ee=F(`label[for="${L.id}"]`);if(ee!==p&&ee.innerText.trim()===f){ee.setAttribute("data-md-switching",""),L.click();break}}window.scrollTo({top:e.offsetTop-u});let d=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...d])])}}),a.pipe(W(c)).subscribe(()=>{for(let p of M("audio, video",e))p.pause()}),lt(e).pipe(b(()=>ts(n)),S(p=>a.next(p)),A(()=>a.complete()),m(p=>j({ref:e},p)))}).pipe(Ze(pe))}function Xn(e,t){let{viewport$:r,target$:o,print$:n}=t;return O(...M(".annotate:not(.highlight)",e).map(i=>Wn(i,{target$:o,print$:n})),...M("pre:not(.mermaid) > code",e).map(i=>zn(i,{target$:o,print$:n})),...M("a:not([title])",e).map(i=>Kn(i,t)),...M("pre.mermaid",e).map(i=>Yn(i)),...M("table:not([class])",e).map(i=>Gn(i)),...M("details",e).map(i=>qn(i,{target$:o,print$:n})),...M("[data-tabs]",e).map(i=>Jn(i,{viewport$:r,target$:o})),...M("[title]",e).filter(()=>Q("content.tooltips")).map(i=>Je(i,{viewport$:r})),...M(".footnote-ref",e).filter(()=>Q("content.footnote.tooltips")).map(i=>Nt(i,{content$:new I(s=>{let a=new URL(i.href).hash.slice(1),c=Array.from(document.getElementById(a).cloneNode(!0).children),p=wr(...c);return s.next(p),document.body.append(p),()=>p.remove()}),viewport$:r})))}function rs(e,{alert$:t}){return t.pipe(b(r=>O($(!0),$(!1).pipe(ot(2e3))).pipe(m(o=>({message:r,active:o})))))}function Zn(e,t){let r=F(".md-typeset",e);return k(()=>{let o=new T;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),rs(e,t).pipe(S(n=>o.next(n)),A(()=>o.complete()),m(n=>j({ref:e},n)))})}var os=0;function ns(e,t){document.body.append(e);let{width:r}=de(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=vr(t),n=typeof o!="undefined"?Ye(o):$({x:0,y:0}),i=O(Ke(t),nt(t)).pipe(Y());return V([i,n]).pipe(m(([s,a])=>{let{x:c,y:p}=Qe(t),l=de(t),f=t.closest("table");return f&&t.parentElement&&(c+=f.offsetLeft+t.parentElement.offsetLeft,p+=f.offsetTop+t.parentElement.offsetTop),{active:s,offset:{x:c-a.x+l.width/2-r/2,y:p-a.y+l.height+8}}}))}function ei(e){let t=e.title;if(!t.length)return x;let r=`__tooltip_${os++}`,o=Dt(r,"inline"),n=F(".md-typeset",o);return n.innerHTML=t,k(()=>{let i=new T;return i.subscribe({next({offset:s}){o.style.setProperty("--md-tooltip-x",`${s.x}px`),o.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),O(i.pipe(v(({active:s})=>s)),i.pipe(Ae(250),v(({active:s})=>!s))).subscribe({next({active:s}){s?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe($e(16,ge)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(gt(125,ge),v(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?o.style.setProperty("--md-tooltip-0",`${-s}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),ns(o,e).pipe(S(s=>i.next(s)),A(()=>i.complete()),m(s=>j({ref:e},s)))}).pipe(Ze(pe))}function is({viewport$:e}){if(!Q("header.autohide"))return $(!1);let t=e.pipe(m(({offset:{y:n}})=>n),tt(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),Y()),o=Be("search");return V([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),Y(),b(n=>n?r:$(!1)),K(!1))}function ti(e,t){return k(()=>V([Le(e),is(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),Y((r,o)=>r.height===o.height&&r.hidden===o.hidden),X(1))}function ri(e,{header$:t,main$:r}){return k(()=>{let o=new T,n=o.pipe(re(),ae(!0));o.pipe(oe("active"),De(t)).subscribe(([{active:s},{hidden:a}])=>{e.classList.toggle("md-header--shadow",s&&!a),e.hidden=a});let i=fe(M("[title]",e)).pipe(v(()=>Q("content.tooltips")),J(s=>ei(s)));return r.subscribe(o),t.pipe(W(n),m(s=>j({ref:e},s)),Ne(i.pipe(W(n))))})}function as(e,{viewport$:t,header$:r}){return Er(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=de(e);return{active:o>=n}}),oe("active"))}function oi(e,t){return k(()=>{let r=new T;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=ue(".md-content h1");return typeof o=="undefined"?x:as(o,t).pipe(S(n=>r.next(n)),A(()=>r.complete()),m(n=>j({ref:e},n)))})}function ni(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),Y()),n=o.pipe(b(()=>Le(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),oe("bottom"))));return V([o,n,t]).pipe(m(([i,{top:s,bottom:a},{offset:{y:c},size:{height:p}}])=>(p=Math.max(0,p-Math.max(0,s-c,i)-Math.max(0,p+c-a)),{offset:s-i,height:p,active:s-i<=c})),Y((i,s)=>i.offset===s.offset&&i.height===s.height&&i.active===s.active))}function ss(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return $(...e).pipe(J(o=>h(o,"change").pipe(m(()=>o))),K(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),X(1))}function ii(e){let t=M("input",e),r=y("meta",{name:"theme-color"});document.head.appendChild(r);let o=y("meta",{name:"color-scheme"});document.head.appendChild(o);let n=Wt("(prefers-color-scheme: light)");return k(()=>{let i=new T;return i.subscribe(s=>{if(document.body.setAttribute("data-md-color-switching",""),s.color.media==="(prefers-color-scheme)"){let a=matchMedia("(prefers-color-scheme: light)"),c=document.querySelector(a.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");s.color.scheme=c.getAttribute("data-md-color-scheme"),s.color.primary=c.getAttribute("data-md-color-primary"),s.color.accent=c.getAttribute("data-md-color-accent")}for(let[a,c]of Object.entries(s.color))document.body.setAttribute(`data-md-color-${a}`,c);for(let a=0;as.key==="Enter"),te(i,(s,a)=>a)).subscribe(({index:s})=>{s=(s+1)%t.length,t[s].click(),t[s].focus()}),i.pipe(m(()=>{let s=Ce("header"),a=window.getComputedStyle(s);return o.content=a.colorScheme,a.backgroundColor.match(/\d+/g).map(c=>(+c).toString(16).padStart(2,"0")).join("")})).subscribe(s=>r.content=`#${s}`),i.pipe(xe(pe)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),ss(t).pipe(W(n.pipe(Pe(1))),vt(),S(s=>i.next(s)),A(()=>i.complete()),m(s=>j({ref:e},s)))})}function ai(e,{progress$:t}){return k(()=>{let r=new T;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(S(o=>r.next({value:o})),A(()=>r.complete()),m(o=>({ref:e,value:o})))})}function si(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function cs(e,t){let r=new Map;for(let o of M("url",e)){let n=F("loc",o),i=[si(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let s of M("[rel=alternate]",o)){let a=s.getAttribute("href");a!=null&&i.push(si(new URL(a),t))}}return r}function kt(e){return xn(new URL("sitemap.xml",e)).pipe(m(t=>cs(t,new URL(e))),ye(()=>$(new Map)),le())}function ci({document$:e}){let t=new Map;e.pipe(b(()=>M("link[rel=alternate]")),m(r=>new URL(r.href)),v(r=>!t.has(r.toString())),J(r=>kt(r).pipe(m(o=>[r,o]),ye(()=>x)))).subscribe(([r,o])=>{t.set(r.toString().replace(/\/$/,""),o)}),h(document.body,"click").pipe(v(r=>!r.metaKey&&!r.ctrlKey),b(r=>{if(r.target instanceof Element){let o=r.target.closest("a");if(o&&!o.target){let n=[...t].find(([f])=>o.href.startsWith(`${f}/`));if(typeof n=="undefined")return x;let[i,s]=n,a=we();if(a.href.startsWith(i))return x;let c=Te(),p=a.href.replace(c.base,"");p=`${i}/${p}`;let l=s.has(p.split("#")[0])?new URL(p,c.base):new URL(i);return r.preventDefault(),$(l)}}return x})).subscribe(r=>at(r,!0))}var so=Gt(io());function ps(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function pi({alert$:e}){so.default.isSupported()&&new I(t=>{new so.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||ps(F(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(S(t=>{t.trigger.focus()}),m(()=>Me("clipboard.copied"))).subscribe(e)}function li(e,t){if(!(e.target instanceof Element))return x;let r=e.target.closest("a");if(r===null)return x;if(r.target||e.metaKey||e.ctrlKey)return x;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),$(r)):x}function mi(e){let t=new Map;for(let r of M(":scope > *",e.head))t.set(r.outerHTML,r);return t}function fi(e){for(let t of M("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return $(e)}function ls(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...Q("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=ue(o),i=ue(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=mi(document);for(let[o,n]of mi(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Ce("container");return qe(M("script",r)).pipe(b(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new I(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),x}),re(),ae(document))}function ui({sitemap$:e,location$:t,viewport$:r,progress$:o}){if(location.protocol==="file:")return x;$(document).subscribe(fi);let n=h(document.body,"click").pipe(De(e),b(([a,c])=>li(a,c)),m(({href:a})=>new URL(a)),le()),i=h(window,"popstate").pipe(m(we),le());n.pipe(te(r)).subscribe(([a,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",a)}),O(n,i).subscribe(t);let s=t.pipe(oe("pathname"),b(a=>yr(a,{progress$:o}).pipe(ye(()=>(at(a,!0),x)))),b(fi),b(ls),le());return O(s.pipe(te(t,(a,c)=>c)),s.pipe(b(()=>t),oe("pathname"),b(()=>t),oe("hash")),t.pipe(Y((a,c)=>a.pathname===c.pathname&&a.hash===c.hash),b(()=>n),S(()=>history.back()))).subscribe(a=>{var c,p;history.state!==null||!a.hash?window.scrollTo(0,(p=(c=history.state)==null?void 0:c.y)!=null?p:0):(history.scrollRestoration="auto",bn(a.hash),history.scrollRestoration="manual")}),t.subscribe(()=>{history.scrollRestoration="manual"}),h(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),r.pipe(oe("offset"),Ae(100)).subscribe(({offset:a})=>{history.replaceState(a,"")}),Q("navigation.instant.prefetch")&&O(h(document.body,"mousemove"),h(document.body,"focusin")).pipe(De(e),b(([a,c])=>li(a,c)),Ae(25),Qr(({href:a})=>a),hr(a=>{let c=document.createElement("link");return c.rel="prefetch",c.href=a.toString(),document.head.appendChild(c),h(c,"load").pipe(m(()=>c),Ee(1))})).subscribe(a=>a.remove()),s}var bi=Gt(hi());function vi(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,s)=>`${i}${s}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return s=>(0,bi.default)(s).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function zt(e){return e.type===1}function Sr(e){return e.type===3}function gi(e,t){let r=On(e);return O($(location.protocol!=="file:"),Be("search")).pipe(Re(o=>o),b(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:Q("search.suggest")}}})),r}function xi({document$:e}){let t=Te(),r=Ge(new URL("../versions.json",t.base)).pipe(ye(()=>x)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:s,aliases:a})=>s===i||a.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),b(n=>h(document.body,"click").pipe(v(i=>!i.metaKey&&!i.ctrlKey),te(o),b(([i,s])=>{if(i.target instanceof Element){let a=i.target.closest("a");if(a&&!a.target&&n.has(a.href)){let c=a.href;return!i.target.closest(".md-version")&&n.get(c)===s?x:(i.preventDefault(),$(c))}}return x}),b(i=>kt(new URL(i)).pipe(m(s=>{let c=we().href.replace(t.base,i);return s.has(c.split("#")[0])?new URL(c):new URL(i)})))))).subscribe(n=>at(n,!0)),V([r,o]).subscribe(([n,i])=>{F(".md-header__topic").appendChild(In(n,i))}),e.pipe(b(()=>o)).subscribe(n=>{var s;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let a=((s=t.version)==null?void 0:s.default)||"latest";Array.isArray(a)||(a=[a]);e:for(let c of a)for(let p of n.aliases.concat(n.version))if(new RegExp(c,"i").test(p)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let a of me("outdated"))a.hidden=!1})}function hs(e,{worker$:t}){let{searchParams:r}=we();r.has("q")&&(it("search",!0),e.value=r.get("q"),e.focus(),Be("search").pipe(Re(i=>!i)).subscribe(()=>{let i=we();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=Ke(e),n=O(t.pipe(Re(zt)),h(e,"keyup"),o).pipe(m(()=>e.value),Y());return V([n,o]).pipe(m(([i,s])=>({value:i,focus:s})),X(1))}function yi(e,{worker$:t}){let r=new T,o=r.pipe(re(),ae(!0));V([t.pipe(Re(zt)),r],(i,s)=>s).pipe(oe("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(oe("focus")).subscribe(({focus:i})=>{i&&it("search",i)}),h(e.form,"reset").pipe(W(o)).subscribe(()=>e.focus());let n=F("header [for=__search]");return h(n,"click").subscribe(()=>e.focus()),hs(e,{worker$:t}).pipe(S(i=>r.next(i)),A(()=>r.complete()),m(i=>j({ref:e},i)),X(1))}function Ei(e,{worker$:t,query$:r}){let o=new T,n=mn(e.parentElement).pipe(v(Boolean)),i=e.parentElement,s=F(":scope > :first-child",e),a=F(":scope > :last-child",e);Be("search").subscribe(l=>a.setAttribute("role",l?"list":"presentation")),o.pipe(te(r),Gr(t.pipe(Re(zt)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:s.textContent=f.length?Me("search.result.none"):Me("search.result.placeholder");break;case 1:s.textContent=Me("search.result.one");break;default:let u=br(l.length);s.textContent=Me("search.result.other",u)}});let c=o.pipe(S(()=>a.innerHTML=""),b(({items:l})=>O($(...l.slice(0,10)),$(...l.slice(10)).pipe(tt(4),Xr(n),b(([f])=>f)))),m($n),le());return c.subscribe(l=>a.appendChild(l)),c.pipe(J(l=>{let f=ue("details",l);return typeof f=="undefined"?x:h(f,"toggle").pipe(W(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(v(Sr),m(({data:l})=>l)).pipe(S(l=>o.next(l)),A(()=>o.complete()),m(l=>j({ref:e},l)))}function bs(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=we();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function wi(e,t){let r=new T,o=r.pipe(re(),ae(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),h(e,"click").pipe(W(o)).subscribe(n=>n.preventDefault()),bs(e,t).pipe(S(n=>r.next(n)),A(()=>r.complete()),m(n=>j({ref:e},n)))}function Ti(e,{worker$:t,keyboard$:r}){let o=new T,n=Ce("search-query"),i=O(h(n,"keydown"),h(n,"focus")).pipe(xe(pe),m(()=>n.value),Y());return o.pipe(De(i),m(([{suggest:a},c])=>{let p=c.split(/([\s-]+)/);if(a!=null&&a.length&&p[p.length-1]){let l=a[a.length-1];l.startsWith(p[p.length-1])&&(p[p.length-1]=l)}else p.length=0;return p})).subscribe(a=>e.innerHTML=a.join("").replace(/\s/g," ")),r.pipe(v(({mode:a})=>a==="search")).subscribe(a=>{switch(a.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(v(Sr),m(({data:a})=>a)).pipe(S(a=>o.next(a)),A(()=>o.complete()),m(()=>({ref:e})))}function Si(e,{index$:t,keyboard$:r}){let o=Te();try{let n=gi(o.search,t),i=Ce("search-query",e),s=Ce("search-result",e);h(e,"click").pipe(v(({target:c})=>c instanceof Element&&!!c.closest("a"))).subscribe(()=>it("search",!1)),r.pipe(v(({mode:c})=>c==="search")).subscribe(c=>{let p=Ve();switch(c.type){case"Enter":if(p===i){let l=new Map;for(let f of M(":first-child [href]",s)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,d])=>d-u);f.click()}c.claim()}break;case"Escape":case"Tab":it("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof p=="undefined")i.focus();else{let l=[i,...M(":not(details) > [href], summary, details[open] [href]",s)],f=Math.max(0,(Math.max(0,l.indexOf(p))+l.length+(c.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}c.claim();break;default:i!==Ve()&&i.focus()}}),r.pipe(v(({mode:c})=>c==="global")).subscribe(c=>{switch(c.type){case"f":case"s":case"/":i.focus(),i.select(),c.claim();break}});let a=yi(i,{worker$:n});return O(a,Ei(s,{worker$:n,query$:a})).pipe(Ne(...me("search-share",e).map(c=>wi(c,{query$:a})),...me("search-suggest",e).map(c=>Ti(c,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,et}}function Oi(e,{index$:t,location$:r}){return V([t,r.pipe(K(we()),v(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>vi(o.config)(n.searchParams.get("h"))),m(o=>{var s;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let a=i.nextNode();a;a=i.nextNode())if((s=a.parentElement)!=null&&s.offsetHeight){let c=a.textContent,p=o(c);p.length>c.length&&n.set(a,p)}for(let[a,c]of n){let{childNodes:p}=y("span",null,c);a.replaceWith(...Array.from(p))}return{ref:e,nodes:n}}))}function vs(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return V([r,t]).pipe(m(([{offset:i,height:s},{offset:{y:a}}])=>(s=s+Math.min(n,Math.max(0,a-i))-n,{height:s,locked:a>=i+n})),Y((i,s)=>i.height===s.height&&i.locked===s.locked))}function co(e,o){var n=o,{header$:t}=n,r=ho(n,["header$"]);let i=F(".md-sidebar__scrollwrap",e),{y:s}=Qe(i);return k(()=>{let a=new T,c=a.pipe(re(),ae(!0)),p=a.pipe($e(0,ge));return p.pipe(te(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*s}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),p.pipe(Re()).subscribe(()=>{for(let l of M(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=de(f);f.scrollTo({top:u-d/2})}}}),fe(M("label[tabindex]",e)).pipe(J(l=>h(l,"click").pipe(xe(pe),m(()=>l),W(c)))).subscribe(l=>{let f=F(`[id="${l.htmlFor}"]`);F(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),Q("content.tooltips")&&fe(M("abbr[title]",e)).pipe(J(l=>Je(l,{viewport$})),W(c)).subscribe(),vs(e,r).pipe(S(l=>a.next(l)),A(()=>a.complete()),m(l=>j({ref:e},l)))})}function Li(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return bt(Ge(`${r}/releases/latest`).pipe(ye(()=>x),m(o=>({version:o.tag_name})),rt({})),Ge(r).pipe(ye(()=>x),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),rt({}))).pipe(m(([o,n])=>j(j({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return Ge(r).pipe(m(o=>({repositories:o.public_repos})),rt({}))}}function Mi(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return Ge(r).pipe(ye(()=>x),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),rt({}))}function _i(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return Li(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return Mi(r,o)}return x}var gs;function xs(e){return gs||(gs=k(()=>{let t=__md_get("__source",sessionStorage);if(t)return $(t);if(me("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return x}return _i(e.href).pipe(S(o=>__md_set("__source",o,sessionStorage)))}).pipe(ye(()=>x),v(t=>Object.keys(t).length>0),m(t=>({facts:t})),X(1)))}function Ai(e){let t=F(":scope > :last-child",e);return k(()=>{let r=new T;return r.subscribe(({facts:o})=>{t.appendChild(Rn(o)),t.classList.add("md-source__repository--active")}),xs(e).pipe(S(o=>r.next(o)),A(()=>r.complete()),m(o=>j({ref:e},o)))})}function ys(e,{viewport$:t,header$:r}){return Le(document.body).pipe(b(()=>Er(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),oe("hidden"))}function Ci(e,t){return k(()=>{let r=new T;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(Q("navigation.tabs.sticky")?$({hidden:!1}):ys(e,t)).pipe(S(o=>r.next(o)),A(()=>r.complete()),m(o=>j({ref:e},o)))})}function Es(e,{viewport$:t,header$:r}){let o=new Map,n=M(".md-nav__link",e);for(let a of n){let c=decodeURIComponent(a.hash.substring(1)),p=ue(`[id="${c}"]`);typeof p!="undefined"&&o.set(a,p)}let i=r.pipe(oe("height"),m(({height:a})=>{let c=Ce("main"),p=F(":scope > :first-child",c);return a+.8*(p.offsetTop-c.offsetTop)}),le());return Le(document.body).pipe(oe("height"),b(a=>k(()=>{let c=[];return $([...o].reduce((p,[l,f])=>{for(;c.length&&o.get(c[c.length-1]).tagName>=f.tagName;)c.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let d=f.offsetParent;for(;d;d=d.offsetParent)u+=d.offsetTop;return p.set([...c=[...c,l]].reverse(),u)},new Map))}).pipe(m(c=>new Map([...c].sort(([,p],[,l])=>p-l))),De(i),b(([c,p])=>t.pipe(Ut(([l,f],{offset:{y:u},size:d})=>{let g=u+d.height>=Math.floor(a.height);for(;f.length;){let[,L]=f[0];if(L-p=u&&!g)f=[l.pop(),...f];else break}return[l,f]},[[],[...c]]),Y((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([a,c])=>({prev:a.map(([p])=>p),next:c.map(([p])=>p)})),K({prev:[],next:[]}),tt(2,1),m(([a,c])=>a.prev.length{let i=new T,s=i.pipe(re(),ae(!0));if(i.subscribe(({prev:a,next:c})=>{for(let[p]of c)p.classList.remove("md-nav__link--passed"),p.classList.remove("md-nav__link--active");for(let[p,[l]]of a.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",p===a.length-1)}),Q("toc.follow")){let a=O(t.pipe(Ae(1),m(()=>{})),t.pipe(Ae(250),m(()=>"smooth")));i.pipe(v(({prev:c})=>c.length>0),De(o.pipe(xe(pe))),te(a)).subscribe(([[{prev:c}],p])=>{let[l]=c[c.length-1];if(l.offsetHeight){let f=vr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=de(f);f.scrollTo({top:u-d/2,behavior:p})}}})}return Q("navigation.tracking")&&t.pipe(W(s),oe("offset"),Ae(250),Pe(1),W(n.pipe(Pe(1))),vt({delay:250}),te(i)).subscribe(([,{prev:a}])=>{let c=we(),p=a[a.length-1];if(p&&p.length){let[l]=p,{hash:f}=new URL(l.href);c.hash!==f&&(c.hash=f,history.replaceState({},"",`${c}`))}else c.hash="",history.replaceState({},"",`${c}`)}),Es(e,{viewport$:t,header$:r}).pipe(S(a=>i.next(a)),A(()=>i.complete()),m(a=>j({ref:e},a)))})}function ws(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:s}})=>s),tt(2,1),m(([s,a])=>s>a&&a>0),Y()),i=r.pipe(m(({active:s})=>s));return V([i,n]).pipe(m(([s,a])=>!(s&&a)),Y(),W(o.pipe(Pe(1))),ae(!0),vt({delay:250}),m(s=>({hidden:s})))}function ki(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new T,s=i.pipe(re(),ae(!0));return i.subscribe({next({hidden:a}){e.hidden=a,a?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(W(s),oe("height")).subscribe(({height:a})=>{e.style.top=`${a+16}px`}),h(e,"click").subscribe(a=>{a.preventDefault(),window.scrollTo({top:0})}),ws(e,{viewport$:t,main$:o,target$:n}).pipe(S(a=>i.next(a)),A(()=>i.complete()),m(a=>j({ref:e},a)))}function $i({document$:e,viewport$:t}){e.pipe(b(()=>M(".md-ellipsis")),J(r=>lt(r).pipe(W(e.pipe(Pe(1))),v(o=>o),m(()=>r),Ee(1))),v(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,Je(n,{viewport$:t}).pipe(W(e.pipe(Pe(1))),A(()=>n.removeAttribute("title")))})).subscribe(),e.pipe(b(()=>M(".md-status")),J(r=>Je(r,{viewport$:t}))).subscribe()}function Ri({document$:e,tablet$:t}){e.pipe(b(()=>M(".md-toggle--indeterminate")),S(r=>{r.indeterminate=!0,r.checked=!1}),J(r=>h(r,"change").pipe(Jr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),te(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function Ts(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Pi({document$:e}){e.pipe(b(()=>M("[data-md-scrollfix]")),S(t=>t.removeAttribute("data-md-scrollfix")),v(Ts),J(t=>h(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function Ii({viewport$:e,tablet$:t}){V([Be("search"),t]).pipe(m(([r,o])=>r&&!o),b(r=>$(r).pipe(ot(r?400:100))),te(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function Ss(){return location.protocol==="file:"?At(`${new URL("search/search_index.js",Or.base)}`).pipe(m(()=>__index),X(1)):Ge(new URL("search/search_index.json",Or.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var st=on(),Kt=dn(),$t=vn(Kt),po=un(),He=Sn(),Lr=Wt("(min-width: 960px)"),ji=Wt("(min-width: 1220px)"),Ui=gn(),Or=Te(),Wi=document.forms.namedItem("search")?Ss():et,lo=new T;pi({alert$:lo});ci({document$:st});var mo=new T,Di=kt(Or.base);Q("navigation.instant")&&ui({sitemap$:Di,location$:Kt,viewport$:He,progress$:mo}).subscribe(st);var Fi;((Fi=Or.version)==null?void 0:Fi.provider)==="mike"&&xi({document$:st});O(Kt,$t).pipe(ot(125)).subscribe(()=>{it("drawer",!1),it("search",!1)});po.pipe(v(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=ue("link[rel=prev]");typeof t!="undefined"&&at(t);break;case"n":case".":let r=ue("link[rel=next]");typeof r!="undefined"&&at(r);break;case"Enter":let o=Ve();o instanceof HTMLLabelElement&&o.click()}});$i({viewport$:He,document$:st});Ri({document$:st,tablet$:Lr});Pi({document$:st});Ii({viewport$:He,tablet$:Lr});var mt=ti(Ce("header"),{viewport$:He}),qt=st.pipe(m(()=>Ce("main")),b(e=>ni(e,{viewport$:He,header$:mt})),X(1)),Os=O(...me("consent").map(e=>Mn(e,{target$:$t})),...me("dialog").map(e=>Zn(e,{alert$:lo})),...me("header").map(e=>ri(e,{viewport$:He,header$:mt,main$:qt})),...me("palette").map(e=>ii(e)),...me("progress").map(e=>ai(e,{progress$:mo})),...me("search").map(e=>Si(e,{index$:Wi,keyboard$:po})),...me("source").map(e=>Ai(e))),Ls=k(()=>O(...me("announce").map(e=>Ln(e)),...me("content").map(e=>Xn(e,{sitemap$:Di,viewport$:He,target$:$t,print$:Ui})),...me("content").map(e=>Q("search.highlight")?Oi(e,{index$:Wi,location$:Kt}):x),...me("header-title").map(e=>oi(e,{viewport$:He,header$:mt})),...me("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?eo(ji,()=>co(e,{viewport$:He,header$:mt,main$:qt})):eo(Lr,()=>co(e,{viewport$:He,header$:mt,main$:qt}))),...me("tabs").map(e=>Ci(e,{viewport$:He,header$:mt})),...me("toc").map(e=>Hi(e,{viewport$:He,header$:mt,main$:qt,target$:$t})),...me("top").map(e=>ki(e,{viewport$:He,header$:mt,main$:qt,target$:$t})))),Ni=st.pipe(b(()=>Ls),Ne(Os),X(1));Ni.subscribe();window.document$=st;window.location$=Kt;window.target$=$t;window.keyboard$=po;window.viewport$=He;window.tablet$=Lr;window.screen$=ji;window.print$=Ui;window.alert$=lo;window.progress$=mo;window.component$=Ni;})(); diff --git a/assets/javascripts/lunr/min/lunr.ar.min.js b/assets/javascripts/lunr/min/lunr.ar.min.js new file mode 100644 index 000000000..9b06c26c1 --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.ar.min.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.ar=function(){this.pipeline.reset(),this.pipeline.add(e.ar.trimmer,e.ar.stopWordFilter,e.ar.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.ar.stemmer))},e.ar.wordCharacters="ء-ٛٱـ",e.ar.trimmer=e.trimmerSupport.generateTrimmer(e.ar.wordCharacters),e.Pipeline.registerFunction(e.ar.trimmer,"trimmer-ar"),e.ar.stemmer=function(){var e=this;return e.result=!1,e.preRemoved=!1,e.sufRemoved=!1,e.pre={pre1:"ف ك ب و س ل ن ا ي ت",pre2:"ال لل",pre3:"بال وال فال تال كال ولل",pre4:"فبال كبال وبال وكال"},e.suf={suf1:"ه ك ت ن ا ي",suf2:"نك نه ها وك يا اه ون ين تن تم نا وا ان كم كن ني نن ما هم هن تك ته ات يه",suf3:"تين كهم نيه نهم ونه وها يهم ونا ونك وني وهم تكم تنا تها تني تهم كما كها ناه نكم هنا تان يها",suf4:"كموه ناها ونني ونهم تكما تموه تكاه كماه ناكم ناهم نيها وننا"},e.patterns=JSON.parse('{"pt43":[{"pt":[{"c":"ا","l":1}]},{"pt":[{"c":"ا,ت,ن,ي","l":0}],"mPt":[{"c":"ف","l":0,"m":1},{"c":"ع","l":1,"m":2},{"c":"ل","l":2,"m":3}]},{"pt":[{"c":"و","l":2}],"mPt":[{"c":"ف","l":0,"m":0},{"c":"ع","l":1,"m":1},{"c":"ل","l":2,"m":3}]},{"pt":[{"c":"ا","l":2}]},{"pt":[{"c":"ي","l":2}],"mPt":[{"c":"ف","l":0,"m":0},{"c":"ع","l":1,"m":1},{"c":"ا","l":2},{"c":"ل","l":3,"m":3}]},{"pt":[{"c":"م","l":0}]}],"pt53":[{"pt":[{"c":"ت","l":0},{"c":"ا","l":2}]},{"pt":[{"c":"ا,ن,ت,ي","l":0},{"c":"ت","l":2}],"mPt":[{"c":"ا","l":0},{"c":"ف","l":1,"m":1},{"c":"ت","l":2},{"c":"ع","l":3,"m":3},{"c":"ا","l":4},{"c":"ل","l":5,"m":4}]},{"pt":[{"c":"ا","l":0},{"c":"ا","l":2}],"mPt":[{"c":"ا","l":0},{"c":"ف","l":1,"m":1},{"c":"ع","l":2,"m":3},{"c":"ل","l":3,"m":4},{"c":"ا","l":4},{"c":"ل","l":5,"m":4}]},{"pt":[{"c":"ا","l":0},{"c":"ا","l":3}],"mPt":[{"c":"ف","l":0,"m":1},{"c":"ع","l":1,"m":2},{"c":"ل","l":2,"m":4}]},{"pt":[{"c":"ا","l":3},{"c":"ن","l":4}]},{"pt":[{"c":"ت","l":0},{"c":"ي","l":3}]},{"pt":[{"c":"م","l":0},{"c":"و","l":3}]},{"pt":[{"c":"ا","l":1},{"c":"و","l":3}]},{"pt":[{"c":"و","l":1},{"c":"ا","l":2}]},{"pt":[{"c":"م","l":0},{"c":"ا","l":3}]},{"pt":[{"c":"م","l":0},{"c":"ي","l":3}]},{"pt":[{"c":"ا","l":2},{"c":"ن","l":3}]},{"pt":[{"c":"م","l":0},{"c":"ن","l":1}],"mPt":[{"c":"ا","l":0},{"c":"ن","l":1},{"c":"ف","l":2,"m":2},{"c":"ع","l":3,"m":3},{"c":"ا","l":4},{"c":"ل","l":5,"m":4}]},{"pt":[{"c":"م","l":0},{"c":"ت","l":2}],"mPt":[{"c":"ا","l":0},{"c":"ف","l":1,"m":1},{"c":"ت","l":2},{"c":"ع","l":3,"m":3},{"c":"ا","l":4},{"c":"ل","l":5,"m":4}]},{"pt":[{"c":"م","l":0},{"c":"ا","l":2}]},{"pt":[{"c":"م","l":1},{"c":"ا","l":3}]},{"pt":[{"c":"ي,ت,ا,ن","l":0},{"c":"ت","l":1}],"mPt":[{"c":"ف","l":0,"m":2},{"c":"ع","l":1,"m":3},{"c":"ا","l":2},{"c":"ل","l":3,"m":4}]},{"pt":[{"c":"ت,ي,ا,ن","l":0},{"c":"ت","l":2}],"mPt":[{"c":"ا","l":0},{"c":"ف","l":1,"m":1},{"c":"ت","l":2},{"c":"ع","l":3,"m":3},{"c":"ا","l":4},{"c":"ل","l":5,"m":4}]},{"pt":[{"c":"ا","l":2},{"c":"ي","l":3}]},{"pt":[{"c":"ا,ي,ت,ن","l":0},{"c":"ن","l":1}],"mPt":[{"c":"ا","l":0},{"c":"ن","l":1},{"c":"ف","l":2,"m":2},{"c":"ع","l":3,"m":3},{"c":"ا","l":4},{"c":"ل","l":5,"m":4}]},{"pt":[{"c":"ا","l":3},{"c":"ء","l":4}]}],"pt63":[{"pt":[{"c":"ا","l":0},{"c":"ت","l":2},{"c":"ا","l":4}]},{"pt":[{"c":"ا,ت,ن,ي","l":0},{"c":"س","l":1},{"c":"ت","l":2}],"mPt":[{"c":"ا","l":0},{"c":"س","l":1},{"c":"ت","l":2},{"c":"ف","l":3,"m":3},{"c":"ع","l":4,"m":4},{"c":"ا","l":5},{"c":"ل","l":6,"m":5}]},{"pt":[{"c":"ا,ن,ت,ي","l":0},{"c":"و","l":3}]},{"pt":[{"c":"م","l":0},{"c":"س","l":1},{"c":"ت","l":2}],"mPt":[{"c":"ا","l":0},{"c":"س","l":1},{"c":"ت","l":2},{"c":"ف","l":3,"m":3},{"c":"ع","l":4,"m":4},{"c":"ا","l":5},{"c":"ل","l":6,"m":5}]},{"pt":[{"c":"ي","l":1},{"c":"ي","l":3},{"c":"ا","l":4},{"c":"ء","l":5}]},{"pt":[{"c":"ا","l":0},{"c":"ن","l":1},{"c":"ا","l":4}]}],"pt54":[{"pt":[{"c":"ت","l":0}]},{"pt":[{"c":"ا,ي,ت,ن","l":0}],"mPt":[{"c":"ا","l":0},{"c":"ف","l":1,"m":1},{"c":"ع","l":2,"m":2},{"c":"ل","l":3,"m":3},{"c":"ر","l":4,"m":4},{"c":"ا","l":5},{"c":"ر","l":6,"m":4}]},{"pt":[{"c":"م","l":0}],"mPt":[{"c":"ا","l":0},{"c":"ف","l":1,"m":1},{"c":"ع","l":2,"m":2},{"c":"ل","l":3,"m":3},{"c":"ر","l":4,"m":4},{"c":"ا","l":5},{"c":"ر","l":6,"m":4}]},{"pt":[{"c":"ا","l":2}]},{"pt":[{"c":"ا","l":0},{"c":"ن","l":2}]}],"pt64":[{"pt":[{"c":"ا","l":0},{"c":"ا","l":4}]},{"pt":[{"c":"م","l":0},{"c":"ت","l":1}]}],"pt73":[{"pt":[{"c":"ا","l":0},{"c":"س","l":1},{"c":"ت","l":2},{"c":"ا","l":5}]}],"pt75":[{"pt":[{"c":"ا","l":0},{"c":"ا","l":5}]}]}'),e.execArray=["cleanWord","removeDiacritics","cleanAlef","removeStopWords","normalizeHamzaAndAlef","removeStartWaw","removePre432","removeEndTaa","wordCheck"],e.stem=function(){var r=0;for(e.result=!1,e.preRemoved=!1,e.sufRemoved=!1;r=0)return!0},e.normalizeHamzaAndAlef=function(){return e.word=e.word.replace("ؤ","ء"),e.word=e.word.replace("ئ","ء"),e.word=e.word.replace(/([\u0627])\1+/gi,"ا"),!1},e.removeEndTaa=function(){return!(e.word.length>2)||(e.word=e.word.replace(/[\u0627]$/,""),e.word=e.word.replace("ة",""),!1)},e.removeStartWaw=function(){return e.word.length>3&&"و"==e.word[0]&&"و"==e.word[1]&&(e.word=e.word.slice(1)),!1},e.removePre432=function(){var r=e.word;if(e.word.length>=7){var t=new RegExp("^("+e.pre.pre4.split(" ").join("|")+")");e.word=e.word.replace(t,"")}if(e.word==r&&e.word.length>=6){var c=new RegExp("^("+e.pre.pre3.split(" ").join("|")+")");e.word=e.word.replace(c,"")}if(e.word==r&&e.word.length>=5){var l=new RegExp("^("+e.pre.pre2.split(" ").join("|")+")");e.word=e.word.replace(l,"")}return r!=e.word&&(e.preRemoved=!0),!1},e.patternCheck=function(r){for(var t=0;t3){var t=new RegExp("^("+e.pre.pre1.split(" ").join("|")+")");e.word=e.word.replace(t,"")}return r!=e.word&&(e.preRemoved=!0),!1},e.removeSuf1=function(){var r=e.word;if(0==e.sufRemoved&&e.word.length>3){var t=new RegExp("("+e.suf.suf1.split(" ").join("|")+")$");e.word=e.word.replace(t,"")}return r!=e.word&&(e.sufRemoved=!0),!1},e.removeSuf432=function(){var r=e.word;if(e.word.length>=6){var t=new RegExp("("+e.suf.suf4.split(" ").join("|")+")$");e.word=e.word.replace(t,"")}if(e.word==r&&e.word.length>=5){var c=new RegExp("("+e.suf.suf3.split(" ").join("|")+")$");e.word=e.word.replace(c,"")}if(e.word==r&&e.word.length>=4){var l=new RegExp("("+e.suf.suf2.split(" ").join("|")+")$");e.word=e.word.replace(l,"")}return r!=e.word&&(e.sufRemoved=!0),!1},e.wordCheck=function(){for(var r=(e.word,[e.removeSuf432,e.removeSuf1,e.removePre1]),t=0,c=!1;e.word.length>=7&&!e.result&&t=f.limit)return;f.cursor++}for(;!f.out_grouping(w,97,248);){if(f.cursor>=f.limit)return;f.cursor++}d=f.cursor,d=d&&(r=f.limit_backward,f.limit_backward=d,f.ket=f.cursor,e=f.find_among_b(c,32),f.limit_backward=r,e))switch(f.bra=f.cursor,e){case 1:f.slice_del();break;case 2:f.in_grouping_b(p,97,229)&&f.slice_del()}}function t(){var e,r=f.limit-f.cursor;f.cursor>=d&&(e=f.limit_backward,f.limit_backward=d,f.ket=f.cursor,f.find_among_b(l,4)?(f.bra=f.cursor,f.limit_backward=e,f.cursor=f.limit-r,f.cursor>f.limit_backward&&(f.cursor--,f.bra=f.cursor,f.slice_del())):f.limit_backward=e)}function s(){var e,r,i,n=f.limit-f.cursor;if(f.ket=f.cursor,f.eq_s_b(2,"st")&&(f.bra=f.cursor,f.eq_s_b(2,"ig")&&f.slice_del()),f.cursor=f.limit-n,f.cursor>=d&&(r=f.limit_backward,f.limit_backward=d,f.ket=f.cursor,e=f.find_among_b(m,5),f.limit_backward=r,e))switch(f.bra=f.cursor,e){case 1:f.slice_del(),i=f.limit-f.cursor,t(),f.cursor=f.limit-i;break;case 2:f.slice_from("løs")}}function o(){var e;f.cursor>=d&&(e=f.limit_backward,f.limit_backward=d,f.ket=f.cursor,f.out_grouping_b(w,97,248)?(f.bra=f.cursor,u=f.slice_to(u),f.limit_backward=e,f.eq_v_b(u)&&f.slice_del()):f.limit_backward=e)}var a,d,u,c=[new r("hed",-1,1),new r("ethed",0,1),new r("ered",-1,1),new r("e",-1,1),new r("erede",3,1),new r("ende",3,1),new r("erende",5,1),new r("ene",3,1),new r("erne",3,1),new r("ere",3,1),new r("en",-1,1),new r("heden",10,1),new r("eren",10,1),new r("er",-1,1),new r("heder",13,1),new r("erer",13,1),new r("s",-1,2),new r("heds",16,1),new r("es",16,1),new r("endes",18,1),new r("erendes",19,1),new r("enes",18,1),new r("ernes",18,1),new r("eres",18,1),new r("ens",16,1),new r("hedens",24,1),new r("erens",24,1),new r("ers",16,1),new r("ets",16,1),new r("erets",28,1),new r("et",-1,1),new r("eret",30,1)],l=[new r("gd",-1,-1),new r("dt",-1,-1),new r("gt",-1,-1),new r("kt",-1,-1)],m=[new r("ig",-1,1),new r("lig",0,1),new r("elig",1,1),new r("els",-1,1),new r("løst",-1,2)],w=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,48,0,128],p=[239,254,42,3,0,0,0,0,0,0,0,0,0,0,0,0,16],f=new i;this.setCurrent=function(e){f.setCurrent(e)},this.getCurrent=function(){return f.getCurrent()},this.stem=function(){var r=f.cursor;return e(),f.limit_backward=r,f.cursor=f.limit,n(),f.cursor=f.limit,t(),f.cursor=f.limit,s(),f.cursor=f.limit,o(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}}(),e.Pipeline.registerFunction(e.da.stemmer,"stemmer-da"),e.da.stopWordFilter=e.generateStopWordFilter("ad af alle alt anden at blev blive bliver da de dem den denne der deres det dette dig din disse dog du efter eller en end er et for fra ham han hans har havde have hende hendes her hos hun hvad hvis hvor i ikke ind jeg jer jo kunne man mange med meget men mig min mine mit mod ned noget nogle nu når og også om op os over på selv sig sin sine sit skal skulle som sådan thi til ud under var vi vil ville vor være været".split(" ")),e.Pipeline.registerFunction(e.da.stopWordFilter,"stopWordFilter-da")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.de.min.js b/assets/javascripts/lunr/min/lunr.de.min.js new file mode 100644 index 000000000..f3b5c108c --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.de.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `German` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.de=function(){this.pipeline.reset(),this.pipeline.add(e.de.trimmer,e.de.stopWordFilter,e.de.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.de.stemmer))},e.de.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.de.trimmer=e.trimmerSupport.generateTrimmer(e.de.wordCharacters),e.Pipeline.registerFunction(e.de.trimmer,"trimmer-de"),e.de.stemmer=function(){var r=e.stemmerSupport.Among,n=e.stemmerSupport.SnowballProgram,i=new function(){function e(e,r,n){return!(!v.eq_s(1,e)||(v.ket=v.cursor,!v.in_grouping(p,97,252)))&&(v.slice_from(r),v.cursor=n,!0)}function i(){for(var r,n,i,s,t=v.cursor;;)if(r=v.cursor,v.bra=r,v.eq_s(1,"ß"))v.ket=v.cursor,v.slice_from("ss");else{if(r>=v.limit)break;v.cursor=r+1}for(v.cursor=t;;)for(n=v.cursor;;){if(i=v.cursor,v.in_grouping(p,97,252)){if(s=v.cursor,v.bra=s,e("u","U",i))break;if(v.cursor=s,e("y","Y",i))break}if(i>=v.limit)return void(v.cursor=n);v.cursor=i+1}}function s(){for(;!v.in_grouping(p,97,252);){if(v.cursor>=v.limit)return!0;v.cursor++}for(;!v.out_grouping(p,97,252);){if(v.cursor>=v.limit)return!0;v.cursor++}return!1}function t(){m=v.limit,l=m;var e=v.cursor+3;0<=e&&e<=v.limit&&(d=e,s()||(m=v.cursor,m=v.limit)return;v.cursor++}}}function c(){return m<=v.cursor}function u(){return l<=v.cursor}function a(){var e,r,n,i,s=v.limit-v.cursor;if(v.ket=v.cursor,(e=v.find_among_b(w,7))&&(v.bra=v.cursor,c()))switch(e){case 1:v.slice_del();break;case 2:v.slice_del(),v.ket=v.cursor,v.eq_s_b(1,"s")&&(v.bra=v.cursor,v.eq_s_b(3,"nis")&&v.slice_del());break;case 3:v.in_grouping_b(g,98,116)&&v.slice_del()}if(v.cursor=v.limit-s,v.ket=v.cursor,(e=v.find_among_b(f,4))&&(v.bra=v.cursor,c()))switch(e){case 1:v.slice_del();break;case 2:if(v.in_grouping_b(k,98,116)){var t=v.cursor-3;v.limit_backward<=t&&t<=v.limit&&(v.cursor=t,v.slice_del())}}if(v.cursor=v.limit-s,v.ket=v.cursor,(e=v.find_among_b(_,8))&&(v.bra=v.cursor,u()))switch(e){case 1:v.slice_del(),v.ket=v.cursor,v.eq_s_b(2,"ig")&&(v.bra=v.cursor,r=v.limit-v.cursor,v.eq_s_b(1,"e")||(v.cursor=v.limit-r,u()&&v.slice_del()));break;case 2:n=v.limit-v.cursor,v.eq_s_b(1,"e")||(v.cursor=v.limit-n,v.slice_del());break;case 3:if(v.slice_del(),v.ket=v.cursor,i=v.limit-v.cursor,!v.eq_s_b(2,"er")&&(v.cursor=v.limit-i,!v.eq_s_b(2,"en")))break;v.bra=v.cursor,c()&&v.slice_del();break;case 4:v.slice_del(),v.ket=v.cursor,e=v.find_among_b(b,2),e&&(v.bra=v.cursor,u()&&1==e&&v.slice_del())}}var d,l,m,h=[new r("",-1,6),new r("U",0,2),new r("Y",0,1),new r("ä",0,3),new r("ö",0,4),new r("ü",0,5)],w=[new r("e",-1,2),new r("em",-1,1),new r("en",-1,2),new r("ern",-1,1),new r("er",-1,1),new r("s",-1,3),new r("es",5,2)],f=[new r("en",-1,1),new r("er",-1,1),new r("st",-1,2),new r("est",2,1)],b=[new r("ig",-1,1),new r("lich",-1,1)],_=[new r("end",-1,1),new r("ig",-1,2),new r("ung",-1,1),new r("lich",-1,3),new r("isch",-1,2),new r("ik",-1,2),new r("heit",-1,3),new r("keit",-1,4)],p=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32,8],g=[117,30,5],k=[117,30,4],v=new n;this.setCurrent=function(e){v.setCurrent(e)},this.getCurrent=function(){return v.getCurrent()},this.stem=function(){var e=v.cursor;return i(),v.cursor=e,t(),v.limit_backward=e,v.cursor=v.limit,a(),v.cursor=v.limit_backward,o(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}}(),e.Pipeline.registerFunction(e.de.stemmer,"stemmer-de"),e.de.stopWordFilter=e.generateStopWordFilter("aber alle allem allen aller alles als also am an ander andere anderem anderen anderer anderes anderm andern anderr anders auch auf aus bei bin bis bist da damit dann das dasselbe dazu daß dein deine deinem deinen deiner deines dem demselben den denn denselben der derer derselbe derselben des desselben dessen dich die dies diese dieselbe dieselben diesem diesen dieser dieses dir doch dort du durch ein eine einem einen einer eines einig einige einigem einigen einiger einiges einmal er es etwas euch euer eure eurem euren eurer eures für gegen gewesen hab habe haben hat hatte hatten hier hin hinter ich ihm ihn ihnen ihr ihre ihrem ihren ihrer ihres im in indem ins ist jede jedem jeden jeder jedes jene jenem jenen jener jenes jetzt kann kein keine keinem keinen keiner keines können könnte machen man manche manchem manchen mancher manches mein meine meinem meinen meiner meines mich mir mit muss musste nach nicht nichts noch nun nur ob oder ohne sehr sein seine seinem seinen seiner seines selbst sich sie sind so solche solchem solchen solcher solches soll sollte sondern sonst um und uns unse unsem unsen unser unses unter viel vom von vor war waren warst was weg weil weiter welche welchem welchen welcher welches wenn werde werden wie wieder will wir wird wirst wo wollen wollte während würde würden zu zum zur zwar zwischen über".split(" ")),e.Pipeline.registerFunction(e.de.stopWordFilter,"stopWordFilter-de")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.du.min.js b/assets/javascripts/lunr/min/lunr.du.min.js new file mode 100644 index 000000000..49a0f3f0a --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.du.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `Dutch` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");console.warn('[Lunr Languages] Please use the "nl" instead of the "du". The "nl" code is the standard code for Dutch language, and "du" will be removed in the next major versions.'),e.du=function(){this.pipeline.reset(),this.pipeline.add(e.du.trimmer,e.du.stopWordFilter,e.du.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.du.stemmer))},e.du.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.du.trimmer=e.trimmerSupport.generateTrimmer(e.du.wordCharacters),e.Pipeline.registerFunction(e.du.trimmer,"trimmer-du"),e.du.stemmer=function(){var r=e.stemmerSupport.Among,i=e.stemmerSupport.SnowballProgram,n=new function(){function e(){for(var e,r,i,o=C.cursor;;){if(C.bra=C.cursor,e=C.find_among(b,11))switch(C.ket=C.cursor,e){case 1:C.slice_from("a");continue;case 2:C.slice_from("e");continue;case 3:C.slice_from("i");continue;case 4:C.slice_from("o");continue;case 5:C.slice_from("u");continue;case 6:if(C.cursor>=C.limit)break;C.cursor++;continue}break}for(C.cursor=o,C.bra=o,C.eq_s(1,"y")?(C.ket=C.cursor,C.slice_from("Y")):C.cursor=o;;)if(r=C.cursor,C.in_grouping(q,97,232)){if(i=C.cursor,C.bra=i,C.eq_s(1,"i"))C.ket=C.cursor,C.in_grouping(q,97,232)&&(C.slice_from("I"),C.cursor=r);else if(C.cursor=i,C.eq_s(1,"y"))C.ket=C.cursor,C.slice_from("Y"),C.cursor=r;else if(n(r))break}else if(n(r))break}function n(e){return C.cursor=e,e>=C.limit||(C.cursor++,!1)}function o(){_=C.limit,f=_,t()||(_=C.cursor,_<3&&(_=3),t()||(f=C.cursor))}function t(){for(;!C.in_grouping(q,97,232);){if(C.cursor>=C.limit)return!0;C.cursor++}for(;!C.out_grouping(q,97,232);){if(C.cursor>=C.limit)return!0;C.cursor++}return!1}function s(){for(var e;;)if(C.bra=C.cursor,e=C.find_among(p,3))switch(C.ket=C.cursor,e){case 1:C.slice_from("y");break;case 2:C.slice_from("i");break;case 3:if(C.cursor>=C.limit)return;C.cursor++}}function u(){return _<=C.cursor}function c(){return f<=C.cursor}function a(){var e=C.limit-C.cursor;C.find_among_b(g,3)&&(C.cursor=C.limit-e,C.ket=C.cursor,C.cursor>C.limit_backward&&(C.cursor--,C.bra=C.cursor,C.slice_del()))}function l(){var e;w=!1,C.ket=C.cursor,C.eq_s_b(1,"e")&&(C.bra=C.cursor,u()&&(e=C.limit-C.cursor,C.out_grouping_b(q,97,232)&&(C.cursor=C.limit-e,C.slice_del(),w=!0,a())))}function m(){var e;u()&&(e=C.limit-C.cursor,C.out_grouping_b(q,97,232)&&(C.cursor=C.limit-e,C.eq_s_b(3,"gem")||(C.cursor=C.limit-e,C.slice_del(),a())))}function d(){var e,r,i,n,o,t,s=C.limit-C.cursor;if(C.ket=C.cursor,e=C.find_among_b(h,5))switch(C.bra=C.cursor,e){case 1:u()&&C.slice_from("heid");break;case 2:m();break;case 3:u()&&C.out_grouping_b(z,97,232)&&C.slice_del()}if(C.cursor=C.limit-s,l(),C.cursor=C.limit-s,C.ket=C.cursor,C.eq_s_b(4,"heid")&&(C.bra=C.cursor,c()&&(r=C.limit-C.cursor,C.eq_s_b(1,"c")||(C.cursor=C.limit-r,C.slice_del(),C.ket=C.cursor,C.eq_s_b(2,"en")&&(C.bra=C.cursor,m())))),C.cursor=C.limit-s,C.ket=C.cursor,e=C.find_among_b(k,6))switch(C.bra=C.cursor,e){case 1:if(c()){if(C.slice_del(),i=C.limit-C.cursor,C.ket=C.cursor,C.eq_s_b(2,"ig")&&(C.bra=C.cursor,c()&&(n=C.limit-C.cursor,!C.eq_s_b(1,"e")))){C.cursor=C.limit-n,C.slice_del();break}C.cursor=C.limit-i,a()}break;case 2:c()&&(o=C.limit-C.cursor,C.eq_s_b(1,"e")||(C.cursor=C.limit-o,C.slice_del()));break;case 3:c()&&(C.slice_del(),l());break;case 4:c()&&C.slice_del();break;case 5:c()&&w&&C.slice_del()}C.cursor=C.limit-s,C.out_grouping_b(j,73,232)&&(t=C.limit-C.cursor,C.find_among_b(v,4)&&C.out_grouping_b(q,97,232)&&(C.cursor=C.limit-t,C.ket=C.cursor,C.cursor>C.limit_backward&&(C.cursor--,C.bra=C.cursor,C.slice_del())))}var f,_,w,b=[new r("",-1,6),new r("á",0,1),new r("ä",0,1),new r("é",0,2),new r("ë",0,2),new r("í",0,3),new r("ï",0,3),new r("ó",0,4),new r("ö",0,4),new r("ú",0,5),new r("ü",0,5)],p=[new r("",-1,3),new r("I",0,2),new r("Y",0,1)],g=[new r("dd",-1,-1),new r("kk",-1,-1),new r("tt",-1,-1)],h=[new r("ene",-1,2),new r("se",-1,3),new r("en",-1,2),new r("heden",2,1),new r("s",-1,3)],k=[new r("end",-1,1),new r("ig",-1,2),new r("ing",-1,1),new r("lijk",-1,3),new r("baar",-1,4),new r("bar",-1,5)],v=[new r("aa",-1,-1),new r("ee",-1,-1),new r("oo",-1,-1),new r("uu",-1,-1)],q=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],j=[1,0,0,17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],z=[17,67,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],C=new i;this.setCurrent=function(e){C.setCurrent(e)},this.getCurrent=function(){return C.getCurrent()},this.stem=function(){var r=C.cursor;return e(),C.cursor=r,o(),C.limit_backward=r,C.cursor=C.limit,d(),C.cursor=C.limit_backward,s(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}}(),e.Pipeline.registerFunction(e.du.stemmer,"stemmer-du"),e.du.stopWordFilter=e.generateStopWordFilter(" aan al alles als altijd andere ben bij daar dan dat de der deze die dit doch doen door dus een eens en er ge geen geweest haar had heb hebben heeft hem het hier hij hoe hun iemand iets ik in is ja je kan kon kunnen maar me meer men met mij mijn moet na naar niet niets nog nu of om omdat onder ons ook op over reeds te tegen toch toen tot u uit uw van veel voor want waren was wat werd wezen wie wil worden wordt zal ze zelf zich zij zijn zo zonder zou".split(" ")),e.Pipeline.registerFunction(e.du.stopWordFilter,"stopWordFilter-du")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.el.min.js b/assets/javascripts/lunr/min/lunr.el.min.js new file mode 100644 index 000000000..ace017bd6 --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.el.min.js @@ -0,0 +1 @@ +!function(e,t){"function"==typeof define&&define.amd?define(t):"object"==typeof exports?module.exports=t():t()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.el=function(){this.pipeline.reset(),void 0===this.searchPipeline&&this.pipeline.add(e.el.trimmer,e.el.normilizer),this.pipeline.add(e.el.stopWordFilter,e.el.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.el.stemmer))},e.el.wordCharacters="A-Za-zΑαΒβΓγΔδΕεΖζΗηΘθΙιΚκΛλΜμΝνΞξΟοΠπΡρΣσςΤτΥυΦφΧχΨψΩωΆάΈέΉήΊίΌόΎύΏώΪΐΫΰΐΰ",e.el.trimmer=e.trimmerSupport.generateTrimmer(e.el.wordCharacters),e.Pipeline.registerFunction(e.el.trimmer,"trimmer-el"),e.el.stemmer=function(){function e(e){return s.test(e)}function t(e){return/[ΑΕΗΙΟΥΩ]$/.test(e)}function r(e){return/[ΑΕΗΙΟΩ]$/.test(e)}function n(n){var s=n;if(n.length<3)return s;if(!e(n))return s;if(i.indexOf(n)>=0)return s;var u=new RegExp("(.*)("+Object.keys(l).join("|")+")$"),o=u.exec(s);return null!==o&&(s=o[1]+l[o[2]]),null!==(o=/^(.+?)(ΑΔΕΣ|ΑΔΩΝ)$/.exec(s))&&(s=o[1],/(ΟΚ|ΜΑΜ|ΜΑΝ|ΜΠΑΜΠ|ΠΑΤΕΡ|ΓΙΑΓΙ|ΝΤΑΝΤ|ΚΥΡ|ΘΕΙ|ΠΕΘΕΡ|ΜΟΥΣΑΜ|ΚΑΠΛΑΜ|ΠΑΡ|ΨΑΡ|ΤΖΟΥΡ|ΤΑΜΠΟΥΡ|ΓΑΛΑΤ|ΦΑΦΛΑΤ)$/.test(o[1])||(s+="ΑΔ")),null!==(o=/^(.+?)(ΕΔΕΣ|ΕΔΩΝ)$/.exec(s))&&(s=o[1],/(ΟΠ|ΙΠ|ΕΜΠ|ΥΠ|ΓΗΠ|ΔΑΠ|ΚΡΑΣΠ|ΜΙΛ)$/.test(o[1])&&(s+="ΕΔ")),null!==(o=/^(.+?)(ΟΥΔΕΣ|ΟΥΔΩΝ)$/.exec(s))&&(s=o[1],/(ΑΡΚ|ΚΑΛΙΑΚ|ΠΕΤΑΛ|ΛΙΧ|ΠΛΕΞ|ΣΚ|Σ|ΦΛ|ΦΡ|ΒΕΛ|ΛΟΥΛ|ΧΝ|ΣΠ|ΤΡΑΓ|ΦΕ)$/.test(o[1])&&(s+="ΟΥΔ")),null!==(o=/^(.+?)(ΕΩΣ|ΕΩΝ|ΕΑΣ|ΕΑ)$/.exec(s))&&(s=o[1],/^(Θ|Δ|ΕΛ|ΓΑΛ|Ν|Π|ΙΔ|ΠΑΡ|ΣΤΕΡ|ΟΡΦ|ΑΝΔΡ|ΑΝΤΡ)$/.test(o[1])&&(s+="Ε")),null!==(o=/^(.+?)(ΕΙΟ|ΕΙΟΣ|ΕΙΟΙ|ΕΙΑ|ΕΙΑΣ|ΕΙΕΣ|ΕΙΟΥ|ΕΙΟΥΣ|ΕΙΩΝ)$/.exec(s))&&o[1].length>4&&(s=o[1]),null!==(o=/^(.+?)(ΙΟΥΣ|ΙΑΣ|ΙΕΣ|ΙΟΣ|ΙΟΥ|ΙΟΙ|ΙΩΝ|ΙΟΝ|ΙΑ|ΙΟ)$/.exec(s))&&(s=o[1],(t(s)||s.length<2||/^(ΑΓ|ΑΓΓΕΛ|ΑΓΡ|ΑΕΡ|ΑΘΛ|ΑΚΟΥΣ|ΑΞ|ΑΣ|Β|ΒΙΒΛ|ΒΥΤ|Γ|ΓΙΑΓ|ΓΩΝ|Δ|ΔΑΝ|ΔΗΛ|ΔΗΜ|ΔΟΚΙΜ|ΕΛ|ΖΑΧΑΡ|ΗΛ|ΗΠ|ΙΔ|ΙΣΚ|ΙΣΤ|ΙΟΝ|ΙΩΝ|ΚΙΜΩΛ|ΚΟΛΟΝ|ΚΟΡ|ΚΤΗΡ|ΚΥΡ|ΛΑΓ|ΛΟΓ|ΜΑΓ|ΜΠΑΝ|ΜΠΡ|ΝΑΥΤ|ΝΟΤ|ΟΠΑΛ|ΟΞ|ΟΡ|ΟΣ|ΠΑΝΑΓ|ΠΑΤΡ|ΠΗΛ|ΠΗΝ|ΠΛΑΙΣ|ΠΟΝΤ|ΡΑΔ|ΡΟΔ|ΣΚ|ΣΚΟΡΠ|ΣΟΥΝ|ΣΠΑΝ|ΣΤΑΔ|ΣΥΡ|ΤΗΛ|ΤΙΜ|ΤΟΚ|ΤΟΠ|ΤΡΟΧ|ΦΙΛ|ΦΩΤ|Χ|ΧΙΛ|ΧΡΩΜ|ΧΩΡ)$/.test(o[1]))&&(s+="Ι"),/^(ΠΑΛ)$/.test(o[1])&&(s+="ΑΙ")),null!==(o=/^(.+?)(ΙΚΟΣ|ΙΚΟΝ|ΙΚΕΙΣ|ΙΚΟΙ|ΙΚΕΣ|ΙΚΟΥΣ|ΙΚΗ|ΙΚΗΣ|ΙΚΟ|ΙΚΑ|ΙΚΟΥ|ΙΚΩΝ|ΙΚΩΣ)$/.exec(s))&&(s=o[1],(t(s)||/^(ΑΔ|ΑΛ|ΑΜΑΝ|ΑΜΕΡ|ΑΜΜΟΧΑΛ|ΑΝΗΘ|ΑΝΤΙΔ|ΑΠΛ|ΑΤΤ|ΑΦΡ|ΒΑΣ|ΒΡΩΜ|ΓΕΝ|ΓΕΡ|Δ|ΔΙΚΑΝ|ΔΥΤ|ΕΙΔ|ΕΝΔ|ΕΞΩΔ|ΗΘ|ΘΕΤ|ΚΑΛΛΙΝ|ΚΑΛΠ|ΚΑΤΑΔ|ΚΟΥΖΙΝ|ΚΡ|ΚΩΔ|ΛΟΓ|Μ|ΜΕΡ|ΜΟΝΑΔ|ΜΟΥΛ|ΜΟΥΣ|ΜΠΑΓΙΑΤ|ΜΠΑΝ|ΜΠΟΛ|ΜΠΟΣ|ΜΥΣΤ|Ν|ΝΙΤ|ΞΙΚ|ΟΠΤ|ΠΑΝ|ΠΕΤΣ|ΠΙΚΑΝΤ|ΠΙΤΣ|ΠΛΑΣΤ|ΠΛΙΑΤΣ|ΠΟΝΤ|ΠΟΣΤΕΛΝ|ΠΡΩΤΟΔ|ΣΕΡΤ|ΣΗΜΑΝΤ|ΣΤΑΤ|ΣΥΝΑΔ|ΣΥΝΟΜΗΛ|ΤΕΛ|ΤΕΧΝ|ΤΡΟΠ|ΤΣΑΜ|ΥΠΟΔ|Φ|ΦΙΛΟΝ|ΦΥΛΟΔ|ΦΥΣ|ΧΑΣ)$/.test(o[1])||/(ΦΟΙΝ)$/.test(o[1]))&&(s+="ΙΚ")),"ΑΓΑΜΕ"===s&&(s="ΑΓΑΜ"),null!==(o=/^(.+?)(ΑΓΑΜΕ|ΗΣΑΜΕ|ΟΥΣΑΜΕ|ΗΚΑΜΕ|ΗΘΗΚΑΜΕ)$/.exec(s))&&(s=o[1]),null!==(o=/^(.+?)(ΑΜΕ)$/.exec(s))&&(s=o[1],/^(ΑΝΑΠ|ΑΠΟΘ|ΑΠΟΚ|ΑΠΟΣΤ|ΒΟΥΒ|ΞΕΘ|ΟΥΛ|ΠΕΘ|ΠΙΚΡ|ΠΟΤ|ΣΙΧ|Χ)$/.test(o[1])&&(s+="ΑΜ")),null!==(o=/^(.+?)(ΑΓΑΝΕ|ΗΣΑΝΕ|ΟΥΣΑΝΕ|ΙΟΝΤΑΝΕ|ΙΟΤΑΝΕ|ΙΟΥΝΤΑΝΕ|ΟΝΤΑΝΕ|ΟΤΑΝΕ|ΟΥΝΤΑΝΕ|ΗΚΑΝΕ|ΗΘΗΚΑΝΕ)$/.exec(s))&&(s=o[1],/^(ΤΡ|ΤΣ)$/.test(o[1])&&(s+="ΑΓΑΝ")),null!==(o=/^(.+?)(ΑΝΕ)$/.exec(s))&&(s=o[1],(r(s)||/^(ΒΕΤΕΡ|ΒΟΥΛΚ|ΒΡΑΧΜ|Γ|ΔΡΑΔΟΥΜ|Θ|ΚΑΛΠΟΥΖ|ΚΑΣΤΕΛ|ΚΟΡΜΟΡ|ΛΑΟΠΛ|ΜΩΑΜΕΘ|Μ|ΜΟΥΣΟΥΛΜΑΝ|ΟΥΛ|Π|ΠΕΛΕΚ|ΠΛ|ΠΟΛΙΣ|ΠΟΡΤΟΛ|ΣΑΡΑΚΑΤΣ|ΣΟΥΛΤ|ΤΣΑΡΛΑΤ|ΟΡΦ|ΤΣΙΓΓ|ΤΣΟΠ|ΦΩΤΟΣΤΕΦ|Χ|ΨΥΧΟΠΛ|ΑΓ|ΟΡΦ|ΓΑΛ|ΓΕΡ|ΔΕΚ|ΔΙΠΛ|ΑΜΕΡΙΚΑΝ|ΟΥΡ|ΠΙΘ|ΠΟΥΡΙΤ|Σ|ΖΩΝΤ|ΙΚ|ΚΑΣΤ|ΚΟΠ|ΛΙΧ|ΛΟΥΘΗΡ|ΜΑΙΝΤ|ΜΕΛ|ΣΙΓ|ΣΠ|ΣΤΕΓ|ΤΡΑΓ|ΤΣΑΓ|Φ|ΕΡ|ΑΔΑΠ|ΑΘΙΓΓ|ΑΜΗΧ|ΑΝΙΚ|ΑΝΟΡΓ|ΑΠΗΓ|ΑΠΙΘ|ΑΤΣΙΓΓ|ΒΑΣ|ΒΑΣΚ|ΒΑΘΥΓΑΛ|ΒΙΟΜΗΧ|ΒΡΑΧΥΚ|ΔΙΑΤ|ΔΙΑΦ|ΕΝΟΡΓ|ΘΥΣ|ΚΑΠΝΟΒΙΟΜΗΧ|ΚΑΤΑΓΑΛ|ΚΛΙΒ|ΚΟΙΛΑΡΦ|ΛΙΒ|ΜΕΓΛΟΒΙΟΜΗΧ|ΜΙΚΡΟΒΙΟΜΗΧ|ΝΤΑΒ|ΞΗΡΟΚΛΙΒ|ΟΛΙΓΟΔΑΜ|ΟΛΟΓΑΛ|ΠΕΝΤΑΡΦ|ΠΕΡΗΦ|ΠΕΡΙΤΡ|ΠΛΑΤ|ΠΟΛΥΔΑΠ|ΠΟΛΥΜΗΧ|ΣΤΕΦ|ΤΑΒ|ΤΕΤ|ΥΠΕΡΗΦ|ΥΠΟΚΟΠ|ΧΑΜΗΛΟΔΑΠ|ΨΗΛΟΤΑΒ)$/.test(o[1]))&&(s+="ΑΝ")),null!==(o=/^(.+?)(ΗΣΕΤΕ)$/.exec(s))&&(s=o[1]),null!==(o=/^(.+?)(ΕΤΕ)$/.exec(s))&&(s=o[1],(r(s)||/(ΟΔ|ΑΙΡ|ΦΟΡ|ΤΑΘ|ΔΙΑΘ|ΣΧ|ΕΝΔ|ΕΥΡ|ΤΙΘ|ΥΠΕΡΘ|ΡΑΘ|ΕΝΘ|ΡΟΘ|ΣΘ|ΠΥΡ|ΑΙΝ|ΣΥΝΔ|ΣΥΝ|ΣΥΝΘ|ΧΩΡ|ΠΟΝ|ΒΡ|ΚΑΘ|ΕΥΘ|ΕΚΘ|ΝΕΤ|ΡΟΝ|ΑΡΚ|ΒΑΡ|ΒΟΛ|ΩΦΕΛ)$/.test(o[1])||/^(ΑΒΑΡ|ΒΕΝ|ΕΝΑΡ|ΑΒΡ|ΑΔ|ΑΘ|ΑΝ|ΑΠΛ|ΒΑΡΟΝ|ΝΤΡ|ΣΚ|ΚΟΠ|ΜΠΟΡ|ΝΙΦ|ΠΑΓ|ΠΑΡΑΚΑΛ|ΣΕΡΠ|ΣΚΕΛ|ΣΥΡΦ|ΤΟΚ|Υ|Δ|ΕΜ|ΘΑΡΡ|Θ)$/.test(o[1]))&&(s+="ΕΤ")),null!==(o=/^(.+?)(ΟΝΤΑΣ|ΩΝΤΑΣ)$/.exec(s))&&(s=o[1],/^ΑΡΧ$/.test(o[1])&&(s+="ΟΝΤ"),/ΚΡΕ$/.test(o[1])&&(s+="ΩΝΤ")),null!==(o=/^(.+?)(ΟΜΑΣΤΕ|ΙΟΜΑΣΤΕ)$/.exec(s))&&(s=o[1],/^ΟΝ$/.test(o[1])&&(s+="ΟΜΑΣΤ")),null!==(o=/^(.+?)(ΙΕΣΤΕ)$/.exec(s))&&(s=o[1],/^(Π|ΑΠ|ΣΥΜΠ|ΑΣΥΜΠ|ΑΚΑΤΑΠ|ΑΜΕΤΑΜΦ)$/.test(o[1])&&(s+="ΙΕΣΤ")),null!==(o=/^(.+?)(ΕΣΤΕ)$/.exec(s))&&(s=o[1],/^(ΑΛ|ΑΡ|ΕΚΤΕΛ|Ζ|Μ|Ξ|ΠΑΡΑΚΑΛ|ΠΡΟ|ΝΙΣ)$/.test(o[1])&&(s+="ΕΣΤ")),null!==(o=/^(.+?)(ΗΘΗΚΑ|ΗΘΗΚΕΣ|ΗΘΗΚΕ)$/.exec(s))&&(s=o[1]),null!==(o=/^(.+?)(ΗΚΑ|ΗΚΕΣ|ΗΚΕ)$/.exec(s))&&(s=o[1],(/(ΣΚΩΛ|ΣΚΟΥΛ|ΝΑΡΘ|ΣΦ|ΟΘ|ΠΙΘ)$/.test(o[1])||/^(ΔΙΑΘ|Θ|ΠΑΡΑΚΑΤΑΘ|ΠΡΟΣΘ|ΣΥΝΘ)$/.test(o[1]))&&(s+="ΗΚ")),null!==(o=/^(.+?)(ΟΥΣΑ|ΟΥΣΕΣ|ΟΥΣΕ)$/.exec(s))&&(s=o[1],(t(s)||/^(ΦΑΡΜΑΚ|ΧΑΔ|ΑΓΚ|ΑΝΑΡΡ|ΒΡΟΜ|ΕΚΛΙΠ|ΛΑΜΠΙΔ|ΛΕΧ|Μ|ΠΑΤ|Ρ|Λ|ΜΕΔ|ΜΕΣΑΖ|ΥΠΟΤΕΙΝ|ΑΜ|ΑΙΘ|ΑΝΗΚ|ΔΕΣΠΟΖ|ΕΝΔΙΑΦΕΡ)$/.test(o[1])||/(ΠΟΔΑΡ|ΒΛΕΠ|ΠΑΝΤΑΧ|ΦΡΥΔ|ΜΑΝΤΙΛ|ΜΑΛΛ|ΚΥΜΑΤ|ΛΑΧ|ΛΗΓ|ΦΑΓ|ΟΜ|ΠΡΩΤ)$/.test(o[1]))&&(s+="ΟΥΣ")),null!==(o=/^(.+?)(ΑΓΑ|ΑΓΕΣ|ΑΓΕ)$/.exec(s))&&(s=o[1],(/^(ΑΒΑΣΤ|ΠΟΛΥΦ|ΑΔΗΦ|ΠΑΜΦ|Ρ|ΑΣΠ|ΑΦ|ΑΜΑΛ|ΑΜΑΛΛΙ|ΑΝΥΣΤ|ΑΠΕΡ|ΑΣΠΑΡ|ΑΧΑΡ|ΔΕΡΒΕΝ|ΔΡΟΣΟΠ|ΞΕΦ|ΝΕΟΠ|ΝΟΜΟΤ|ΟΛΟΠ|ΟΜΟΤ|ΠΡΟΣΤ|ΠΡΟΣΩΠΟΠ|ΣΥΜΠ|ΣΥΝΤ|Τ|ΥΠΟΤ|ΧΑΡ|ΑΕΙΠ|ΑΙΜΟΣΤ|ΑΝΥΠ|ΑΠΟΤ|ΑΡΤΙΠ|ΔΙΑΤ|ΕΝ|ΕΠΙΤ|ΚΡΟΚΑΛΟΠ|ΣΙΔΗΡΟΠ|Λ|ΝΑΥ|ΟΥΛΑΜ|ΟΥΡ|Π|ΤΡ|Μ)$/.test(o[1])||/(ΟΦ|ΠΕΛ|ΧΟΡΤ|ΛΛ|ΣΦ|ΡΠ|ΦΡ|ΠΡ|ΛΟΧ|ΣΜΗΝ)$/.test(o[1])&&!/^(ΨΟΦ|ΝΑΥΛΟΧ)$/.test(o[1])||/(ΚΟΛΛ)$/.test(o[1]))&&(s+="ΑΓ")),null!==(o=/^(.+?)(ΗΣΕ|ΗΣΟΥ|ΗΣΑ)$/.exec(s))&&(s=o[1],/^(Ν|ΧΕΡΣΟΝ|ΔΩΔΕΚΑΝ|ΕΡΗΜΟΝ|ΜΕΓΑΛΟΝ|ΕΠΤΑΝ|Ι)$/.test(o[1])&&(s+="ΗΣ")),null!==(o=/^(.+?)(ΗΣΤΕ)$/.exec(s))&&(s=o[1],/^(ΑΣΒ|ΣΒ|ΑΧΡ|ΧΡ|ΑΠΛ|ΑΕΙΜΝ|ΔΥΣΧΡ|ΕΥΧΡ|ΚΟΙΝΟΧΡ|ΠΑΛΙΜΨ)$/.test(o[1])&&(s+="ΗΣΤ")),null!==(o=/^(.+?)(ΟΥΝΕ|ΗΣΟΥΝΕ|ΗΘΟΥΝΕ)$/.exec(s))&&(s=o[1],/^(Ν|Ρ|ΣΠΙ|ΣΤΡΑΒΟΜΟΥΤΣ|ΚΑΚΟΜΟΥΤΣ|ΕΞΩΝ)$/.test(o[1])&&(s+="ΟΥΝ")),null!==(o=/^(.+?)(ΟΥΜΕ|ΗΣΟΥΜΕ|ΗΘΟΥΜΕ)$/.exec(s))&&(s=o[1],/^(ΠΑΡΑΣΟΥΣ|Φ|Χ|ΩΡΙΟΠΛ|ΑΖ|ΑΛΛΟΣΟΥΣ|ΑΣΟΥΣ)$/.test(o[1])&&(s+="ΟΥΜ")),null!=(o=/^(.+?)(ΜΑΤΟΙ|ΜΑΤΟΥΣ|ΜΑΤΟ|ΜΑΤΑ|ΜΑΤΩΣ|ΜΑΤΩΝ|ΜΑΤΟΣ|ΜΑΤΕΣ|ΜΑΤΗ|ΜΑΤΗΣ|ΜΑΤΟΥ)$/.exec(s))&&(s=o[1]+"Μ",/^(ΓΡΑΜ)$/.test(o[1])?s+="Α":/^(ΓΕ|ΣΤΑ)$/.test(o[1])&&(s+="ΑΤ")),null!==(o=/^(.+?)(ΟΥΑ)$/.exec(s))&&(s=o[1]+"ΟΥ"),n.length===s.length&&null!==(o=/^(.+?)(Α|ΑΓΑΤΕ|ΑΓΑΝ|ΑΕΙ|ΑΜΑΙ|ΑΝ|ΑΣ|ΑΣΑΙ|ΑΤΑΙ|ΑΩ|Ε|ΕΙ|ΕΙΣ|ΕΙΤΕ|ΕΣΑΙ|ΕΣ|ΕΤΑΙ|Ι|ΙΕΜΑΙ|ΙΕΜΑΣΤΕ|ΙΕΤΑΙ|ΙΕΣΑΙ|ΙΕΣΑΣΤΕ|ΙΟΜΑΣΤΑΝ|ΙΟΜΟΥΝ|ΙΟΜΟΥΝΑ|ΙΟΝΤΑΝ|ΙΟΝΤΟΥΣΑΝ|ΙΟΣΑΣΤΑΝ|ΙΟΣΑΣΤΕ|ΙΟΣΟΥΝ|ΙΟΣΟΥΝΑ|ΙΟΤΑΝ|ΙΟΥΜΑ|ΙΟΥΜΑΣΤΕ|ΙΟΥΝΤΑΙ|ΙΟΥΝΤΑΝ|Η|ΗΔΕΣ|ΗΔΩΝ|ΗΘΕΙ|ΗΘΕΙΣ|ΗΘΕΙΤΕ|ΗΘΗΚΑΤΕ|ΗΘΗΚΑΝ|ΗΘΟΥΝ|ΗΘΩ|ΗΚΑΤΕ|ΗΚΑΝ|ΗΣ|ΗΣΑΝ|ΗΣΑΤΕ|ΗΣΕΙ|ΗΣΕΣ|ΗΣΟΥΝ|ΗΣΩ|Ο|ΟΙ|ΟΜΑΙ|ΟΜΑΣΤΑΝ|ΟΜΟΥΝ|ΟΜΟΥΝΑ|ΟΝΤΑΙ|ΟΝΤΑΝ|ΟΝΤΟΥΣΑΝ|ΟΣ|ΟΣΑΣΤΑΝ|ΟΣΑΣΤΕ|ΟΣΟΥΝ|ΟΣΟΥΝΑ|ΟΤΑΝ|ΟΥ|ΟΥΜΑΙ|ΟΥΜΑΣΤΕ|ΟΥΝ|ΟΥΝΤΑΙ|ΟΥΝΤΑΝ|ΟΥΣ|ΟΥΣΑΝ|ΟΥΣΑΤΕ|Υ||ΥΑ|ΥΣ|Ω|ΩΝ|ΟΙΣ)$/.exec(s))&&(s=o[1]),null!=(o=/^(.+?)(ΕΣΤΕΡ|ΕΣΤΑΤ|ΟΤΕΡ|ΟΤΑΤ|ΥΤΕΡ|ΥΤΑΤ|ΩΤΕΡ|ΩΤΑΤ)$/.exec(s))&&(/^(ΕΞ|ΕΣ|ΑΝ|ΚΑΤ|Κ|ΠΡ)$/.test(o[1])||(s=o[1]),/^(ΚΑ|Μ|ΕΛΕ|ΛΕ|ΔΕ)$/.test(o[1])&&(s+="ΥΤ")),s}var l={"ΦΑΓΙΑ":"ΦΑ","ΦΑΓΙΟΥ":"ΦΑ","ΦΑΓΙΩΝ":"ΦΑ","ΣΚΑΓΙΑ":"ΣΚΑ","ΣΚΑΓΙΟΥ":"ΣΚΑ","ΣΚΑΓΙΩΝ":"ΣΚΑ","ΣΟΓΙΟΥ":"ΣΟ","ΣΟΓΙΑ":"ΣΟ","ΣΟΓΙΩΝ":"ΣΟ","ΤΑΤΟΓΙΑ":"ΤΑΤΟ","ΤΑΤΟΓΙΟΥ":"ΤΑΤΟ","ΤΑΤΟΓΙΩΝ":"ΤΑΤΟ","ΚΡΕΑΣ":"ΚΡΕ","ΚΡΕΑΤΟΣ":"ΚΡΕ","ΚΡΕΑΤΑ":"ΚΡΕ","ΚΡΕΑΤΩΝ":"ΚΡΕ","ΠΕΡΑΣ":"ΠΕΡ","ΠΕΡΑΤΟΣ":"ΠΕΡ","ΠΕΡΑΤΑ":"ΠΕΡ","ΠΕΡΑΤΩΝ":"ΠΕΡ","ΤΕΡΑΣ":"ΤΕΡ","ΤΕΡΑΤΟΣ":"ΤΕΡ","ΤΕΡΑΤΑ":"ΤΕΡ","ΤΕΡΑΤΩΝ":"ΤΕΡ","ΦΩΣ":"ΦΩ","ΦΩΤΟΣ":"ΦΩ","ΦΩΤΑ":"ΦΩ","ΦΩΤΩΝ":"ΦΩ","ΚΑΘΕΣΤΩΣ":"ΚΑΘΕΣΤ","ΚΑΘΕΣΤΩΤΟΣ":"ΚΑΘΕΣΤ","ΚΑΘΕΣΤΩΤΑ":"ΚΑΘΕΣΤ","ΚΑΘΕΣΤΩΤΩΝ":"ΚΑΘΕΣΤ","ΓΕΓΟΝΟΣ":"ΓΕΓΟΝ","ΓΕΓΟΝΟΤΟΣ":"ΓΕΓΟΝ","ΓΕΓΟΝΟΤΑ":"ΓΕΓΟΝ","ΓΕΓΟΝΟΤΩΝ":"ΓΕΓΟΝ","ΕΥΑ":"ΕΥ"},i=["ΑΚΡΙΒΩΣ","ΑΛΑ","ΑΛΛΑ","ΑΛΛΙΩΣ","ΑΛΛΟΤΕ","ΑΜΑ","ΑΝΩ","ΑΝΑ","ΑΝΑΜΕΣΑ","ΑΝΑΜΕΤΑΞΥ","ΑΝΕΥ","ΑΝΤΙ","ΑΝΤΙΠΕΡΑ","ΑΝΤΙΟ","ΑΞΑΦΝΑ","ΑΠΟ","ΑΠΟΨΕ","ΑΡΑ","ΑΡΑΓΕ","ΑΥΡΙΟ","ΑΦΟΙ","ΑΦΟΥ","ΑΦΟΤΟΥ","ΒΡΕ","ΓΕΙΑ","ΓΙΑ","ΓΙΑΤΙ","ΓΡΑΜΜΑ","ΔΕΗ","ΔΕΝ","ΔΗΛΑΔΗ","ΔΙΧΩΣ","ΔΥΟ","ΕΑΝ","ΕΓΩ","ΕΔΩ","ΕΔΑ","ΕΙΘΕ","ΕΙΜΑΙ","ΕΙΜΑΣΤΕ","ΕΙΣΑΙ","ΕΙΣΑΣΤΕ","ΕΙΝΑΙ","ΕΙΣΤΕ","ΕΙΤΕ","ΕΚΕΙ","ΕΚΟ","ΕΛΑ","ΕΜΑΣ","ΕΜΕΙΣ","ΕΝΤΕΛΩΣ","ΕΝΤΟΣ","ΕΝΤΩΜΕΤΑΞΥ","ΕΝΩ","ΕΞΙ","ΕΞΙΣΟΥ","ΕΞΗΣ","ΕΞΩ","ΕΟΚ","ΕΠΑΝΩ","ΕΠΕΙΔΗ","ΕΠΕΙΤΑ","ΕΠΙ","ΕΠΙΣΗΣ","ΕΠΟΜΕΝΩΣ","ΕΠΤΑ","ΕΣΑΣ","ΕΣΕΙΣ","ΕΣΤΩ","ΕΣΥ","ΕΣΩ","ΕΤΣΙ","ΕΥΓΕ","ΕΦΕ","ΕΦΕΞΗΣ","ΕΧΤΕΣ","ΕΩΣ","ΗΔΗ","ΗΜΙ","ΗΠΑ","ΗΤΟΙ","ΘΕΣ","ΙΔΙΩΣ","ΙΔΗ","ΙΚΑ","ΙΣΩΣ","ΚΑΘΕ","ΚΑΘΕΤΙ","ΚΑΘΟΛΟΥ","ΚΑΘΩΣ","ΚΑΙ","ΚΑΝ","ΚΑΠΟΤΕ","ΚΑΠΟΥ","ΚΑΤΑ","ΚΑΤΙ","ΚΑΤΟΠΙΝ","ΚΑΤΩ","ΚΕΙ","ΚΙΧ","ΚΚΕ","ΚΟΛΑΝ","ΚΥΡΙΩΣ","ΚΩΣ","ΜΑΚΑΡΙ","ΜΑΛΙΣΤΑ","ΜΑΛΛΟΝ","ΜΑΙ","ΜΑΟ","ΜΑΟΥΣ","ΜΑΣ","ΜΕΘΑΥΡΙΟ","ΜΕΣ","ΜΕΣΑ","ΜΕΤΑ","ΜΕΤΑΞΥ","ΜΕΧΡΙ","ΜΗΔΕ","ΜΗΝ","ΜΗΠΩΣ","ΜΗΤΕ","ΜΙΑ","ΜΙΑΣ","ΜΙΣ","ΜΜΕ","ΜΟΛΟΝΟΤΙ","ΜΟΥ","ΜΠΑ","ΜΠΑΣ","ΜΠΟΥΦΑΝ","ΜΠΡΟΣ","ΝΑΙ","ΝΕΣ","ΝΤΑ","ΝΤΕ","ΞΑΝΑ","ΟΗΕ","ΟΚΤΩ","ΟΜΩΣ","ΟΝΕ","ΟΠΑ","ΟΠΟΥ","ΟΠΩΣ","ΟΣΟ","ΟΤΑΝ","ΟΤΕ","ΟΤΙ","ΟΥΤΕ","ΟΧΙ","ΠΑΛΙ","ΠΑΝ","ΠΑΝΟ","ΠΑΝΤΟΤΕ","ΠΑΝΤΟΥ","ΠΑΝΤΩΣ","ΠΑΝΩ","ΠΑΡΑ","ΠΕΡΑ","ΠΕΡΙ","ΠΕΡΙΠΟΥ","ΠΙΑ","ΠΙΟ","ΠΙΣΩ","ΠΛΑΙ","ΠΛΕΟΝ","ΠΛΗΝ","ΠΟΤΕ","ΠΟΥ","ΠΡΟ","ΠΡΟΣ","ΠΡΟΧΤΕΣ","ΠΡΟΧΘΕΣ","ΡΟΔΙ","ΠΩΣ","ΣΑΙ","ΣΑΣ","ΣΑΝ","ΣΕΙΣ","ΣΙΑ","ΣΚΙ","ΣΟΙ","ΣΟΥ","ΣΡΙ","ΣΥΝ","ΣΥΝΑΜΑ","ΣΧΕΔΟΝ","ΤΑΔΕ","ΤΑΞΙ","ΤΑΧΑ","ΤΕΙ","ΤΗΝ","ΤΗΣ","ΤΙΠΟΤΑ","ΤΙΠΟΤΕ","ΤΙΣ","ΤΟΝ","ΤΟΤΕ","ΤΟΥ","ΤΟΥΣ","ΤΣΑ","ΤΣΕ","ΤΣΙ","ΤΣΟΥ","ΤΩΝ","ΥΠΟ","ΥΠΟΨΗ","ΥΠΟΨΙΝ","ΥΣΤΕΡΑ","ΦΕΤΟΣ","ΦΙΣ","ΦΠΑ","ΧΑΦ","ΧΘΕΣ","ΧΤΕΣ","ΧΩΡΙΣ","ΩΣ","ΩΣΑΝ","ΩΣΟΤΟΥ","ΩΣΠΟΥ","ΩΣΤΕ","ΩΣΤΟΣΟ"],s=new RegExp("^[ΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩ]+$");return function(e){return"function"==typeof e.update?e.update(function(e){return n(e.toUpperCase()).toLowerCase()}):n(e.toUpperCase()).toLowerCase()}}(),e.Pipeline.registerFunction(e.el.stemmer,"stemmer-el"),e.el.stopWordFilter=e.generateStopWordFilter("αλλα αν αντι απο αυτα αυτεσ αυτη αυτο αυτοι αυτοσ αυτουσ αυτων για δε δεν εαν ειμαι ειμαστε ειναι εισαι ειστε εκεινα εκεινεσ εκεινη εκεινο εκεινοι εκεινοσ εκεινουσ εκεινων ενω επι η θα ισωσ κ και κατα κι μα με μετα μη μην να ο οι ομωσ οπωσ οσο οτι παρα ποια ποιεσ ποιο ποιοι ποιοσ ποιουσ ποιων που προσ πωσ σε στη στην στο στον τα την τησ το τον τοτε του των ωσ".split(" ")),e.Pipeline.registerFunction(e.el.stopWordFilter,"stopWordFilter-el"),e.el.normilizer=function(){var e={"Ά":"Α","ά":"α","Έ":"Ε","έ":"ε","Ή":"Η","ή":"η","Ί":"Ι","ί":"ι","Ό":"Ο","ο":"ο","Ύ":"Υ","ύ":"υ","Ώ":"Ω","ώ":"ω","Ϊ":"Ι","ϊ":"ι","Ϋ":"Υ","ϋ":"υ","ΐ":"ι","ΰ":"υ"};return function(t){if("function"==typeof t.update)return t.update(function(t){for(var r="",n=0;n=A.limit)return!0;A.cursor++}return!1}return!0}function n(){if(A.in_grouping(x,97,252)){var s=A.cursor;if(e()){if(A.cursor=s,!A.in_grouping(x,97,252))return!0;for(;!A.out_grouping(x,97,252);){if(A.cursor>=A.limit)return!0;A.cursor++}}return!1}return!0}function i(){var s,r=A.cursor;if(n()){if(A.cursor=r,!A.out_grouping(x,97,252))return;if(s=A.cursor,e()){if(A.cursor=s,!A.in_grouping(x,97,252)||A.cursor>=A.limit)return;A.cursor++}}g=A.cursor}function a(){for(;!A.in_grouping(x,97,252);){if(A.cursor>=A.limit)return!1;A.cursor++}for(;!A.out_grouping(x,97,252);){if(A.cursor>=A.limit)return!1;A.cursor++}return!0}function t(){var e=A.cursor;g=A.limit,p=g,v=g,i(),A.cursor=e,a()&&(p=A.cursor,a()&&(v=A.cursor))}function o(){for(var e;;){if(A.bra=A.cursor,e=A.find_among(k,6))switch(A.ket=A.cursor,e){case 1:A.slice_from("a");continue;case 2:A.slice_from("e");continue;case 3:A.slice_from("i");continue;case 4:A.slice_from("o");continue;case 5:A.slice_from("u");continue;case 6:if(A.cursor>=A.limit)break;A.cursor++;continue}break}}function u(){return g<=A.cursor}function w(){return p<=A.cursor}function c(){return v<=A.cursor}function m(){var e;if(A.ket=A.cursor,A.find_among_b(y,13)&&(A.bra=A.cursor,(e=A.find_among_b(q,11))&&u()))switch(e){case 1:A.bra=A.cursor,A.slice_from("iendo");break;case 2:A.bra=A.cursor,A.slice_from("ando");break;case 3:A.bra=A.cursor,A.slice_from("ar");break;case 4:A.bra=A.cursor,A.slice_from("er");break;case 5:A.bra=A.cursor,A.slice_from("ir");break;case 6:A.slice_del();break;case 7:A.eq_s_b(1,"u")&&A.slice_del()}}function l(e,s){if(!c())return!0;A.slice_del(),A.ket=A.cursor;var r=A.find_among_b(e,s);return r&&(A.bra=A.cursor,1==r&&c()&&A.slice_del()),!1}function d(e){return!c()||(A.slice_del(),A.ket=A.cursor,A.eq_s_b(2,e)&&(A.bra=A.cursor,c()&&A.slice_del()),!1)}function b(){var e;if(A.ket=A.cursor,e=A.find_among_b(S,46)){switch(A.bra=A.cursor,e){case 1:if(!c())return!1;A.slice_del();break;case 2:if(d("ic"))return!1;break;case 3:if(!c())return!1;A.slice_from("log");break;case 4:if(!c())return!1;A.slice_from("u");break;case 5:if(!c())return!1;A.slice_from("ente");break;case 6:if(!w())return!1;A.slice_del(),A.ket=A.cursor,e=A.find_among_b(C,4),e&&(A.bra=A.cursor,c()&&(A.slice_del(),1==e&&(A.ket=A.cursor,A.eq_s_b(2,"at")&&(A.bra=A.cursor,c()&&A.slice_del()))));break;case 7:if(l(P,3))return!1;break;case 8:if(l(F,3))return!1;break;case 9:if(d("at"))return!1}return!0}return!1}function f(){var e,s;if(A.cursor>=g&&(s=A.limit_backward,A.limit_backward=g,A.ket=A.cursor,e=A.find_among_b(W,12),A.limit_backward=s,e)){if(A.bra=A.cursor,1==e){if(!A.eq_s_b(1,"u"))return!1;A.slice_del()}return!0}return!1}function _(){var e,s,r,n;if(A.cursor>=g&&(s=A.limit_backward,A.limit_backward=g,A.ket=A.cursor,e=A.find_among_b(L,96),A.limit_backward=s,e))switch(A.bra=A.cursor,e){case 1:r=A.limit-A.cursor,A.eq_s_b(1,"u")?(n=A.limit-A.cursor,A.eq_s_b(1,"g")?A.cursor=A.limit-n:A.cursor=A.limit-r):A.cursor=A.limit-r,A.bra=A.cursor;case 2:A.slice_del()}}function h(){var e,s;if(A.ket=A.cursor,e=A.find_among_b(z,8))switch(A.bra=A.cursor,e){case 1:u()&&A.slice_del();break;case 2:u()&&(A.slice_del(),A.ket=A.cursor,A.eq_s_b(1,"u")&&(A.bra=A.cursor,s=A.limit-A.cursor,A.eq_s_b(1,"g")&&(A.cursor=A.limit-s,u()&&A.slice_del())))}}var v,p,g,k=[new s("",-1,6),new s("á",0,1),new s("é",0,2),new s("í",0,3),new s("ó",0,4),new s("ú",0,5)],y=[new s("la",-1,-1),new s("sela",0,-1),new s("le",-1,-1),new s("me",-1,-1),new s("se",-1,-1),new s("lo",-1,-1),new s("selo",5,-1),new s("las",-1,-1),new s("selas",7,-1),new s("les",-1,-1),new s("los",-1,-1),new s("selos",10,-1),new s("nos",-1,-1)],q=[new s("ando",-1,6),new s("iendo",-1,6),new s("yendo",-1,7),new s("ándo",-1,2),new s("iéndo",-1,1),new s("ar",-1,6),new s("er",-1,6),new s("ir",-1,6),new s("ár",-1,3),new s("ér",-1,4),new s("ír",-1,5)],C=[new s("ic",-1,-1),new s("ad",-1,-1),new s("os",-1,-1),new s("iv",-1,1)],P=[new s("able",-1,1),new s("ible",-1,1),new s("ante",-1,1)],F=[new s("ic",-1,1),new s("abil",-1,1),new s("iv",-1,1)],S=[new s("ica",-1,1),new s("ancia",-1,2),new s("encia",-1,5),new s("adora",-1,2),new s("osa",-1,1),new s("ista",-1,1),new s("iva",-1,9),new s("anza",-1,1),new s("logía",-1,3),new s("idad",-1,8),new s("able",-1,1),new s("ible",-1,1),new s("ante",-1,2),new s("mente",-1,7),new s("amente",13,6),new s("ación",-1,2),new s("ución",-1,4),new s("ico",-1,1),new s("ismo",-1,1),new s("oso",-1,1),new s("amiento",-1,1),new s("imiento",-1,1),new s("ivo",-1,9),new s("ador",-1,2),new s("icas",-1,1),new s("ancias",-1,2),new s("encias",-1,5),new s("adoras",-1,2),new s("osas",-1,1),new s("istas",-1,1),new s("ivas",-1,9),new s("anzas",-1,1),new s("logías",-1,3),new s("idades",-1,8),new s("ables",-1,1),new s("ibles",-1,1),new s("aciones",-1,2),new s("uciones",-1,4),new s("adores",-1,2),new s("antes",-1,2),new s("icos",-1,1),new s("ismos",-1,1),new s("osos",-1,1),new s("amientos",-1,1),new s("imientos",-1,1),new s("ivos",-1,9)],W=[new s("ya",-1,1),new s("ye",-1,1),new s("yan",-1,1),new s("yen",-1,1),new s("yeron",-1,1),new s("yendo",-1,1),new s("yo",-1,1),new s("yas",-1,1),new s("yes",-1,1),new s("yais",-1,1),new s("yamos",-1,1),new s("yó",-1,1)],L=[new s("aba",-1,2),new s("ada",-1,2),new s("ida",-1,2),new s("ara",-1,2),new s("iera",-1,2),new s("ía",-1,2),new s("aría",5,2),new s("ería",5,2),new s("iría",5,2),new s("ad",-1,2),new s("ed",-1,2),new s("id",-1,2),new s("ase",-1,2),new s("iese",-1,2),new s("aste",-1,2),new s("iste",-1,2),new s("an",-1,2),new s("aban",16,2),new s("aran",16,2),new s("ieran",16,2),new s("ían",16,2),new s("arían",20,2),new s("erían",20,2),new s("irían",20,2),new s("en",-1,1),new s("asen",24,2),new s("iesen",24,2),new s("aron",-1,2),new s("ieron",-1,2),new s("arán",-1,2),new s("erán",-1,2),new s("irán",-1,2),new s("ado",-1,2),new s("ido",-1,2),new s("ando",-1,2),new s("iendo",-1,2),new s("ar",-1,2),new s("er",-1,2),new s("ir",-1,2),new s("as",-1,2),new s("abas",39,2),new s("adas",39,2),new s("idas",39,2),new s("aras",39,2),new s("ieras",39,2),new s("ías",39,2),new s("arías",45,2),new s("erías",45,2),new s("irías",45,2),new s("es",-1,1),new s("ases",49,2),new s("ieses",49,2),new s("abais",-1,2),new s("arais",-1,2),new s("ierais",-1,2),new s("íais",-1,2),new s("aríais",55,2),new s("eríais",55,2),new s("iríais",55,2),new s("aseis",-1,2),new s("ieseis",-1,2),new s("asteis",-1,2),new s("isteis",-1,2),new s("áis",-1,2),new s("éis",-1,1),new s("aréis",64,2),new s("eréis",64,2),new s("iréis",64,2),new s("ados",-1,2),new s("idos",-1,2),new s("amos",-1,2),new s("ábamos",70,2),new s("áramos",70,2),new s("iéramos",70,2),new s("íamos",70,2),new s("aríamos",74,2),new s("eríamos",74,2),new s("iríamos",74,2),new s("emos",-1,1),new s("aremos",78,2),new s("eremos",78,2),new s("iremos",78,2),new s("ásemos",78,2),new s("iésemos",78,2),new s("imos",-1,2),new s("arás",-1,2),new s("erás",-1,2),new s("irás",-1,2),new s("ís",-1,2),new s("ará",-1,2),new s("erá",-1,2),new s("irá",-1,2),new s("aré",-1,2),new s("eré",-1,2),new s("iré",-1,2),new s("ió",-1,2)],z=[new s("a",-1,1),new s("e",-1,2),new s("o",-1,1),new s("os",-1,1),new s("á",-1,1),new s("é",-1,2),new s("í",-1,1),new s("ó",-1,1)],x=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,1,17,4,10],A=new r;this.setCurrent=function(e){A.setCurrent(e)},this.getCurrent=function(){return A.getCurrent()},this.stem=function(){var e=A.cursor;return t(),A.limit_backward=e,A.cursor=A.limit,m(),A.cursor=A.limit,b()||(A.cursor=A.limit,f()||(A.cursor=A.limit,_())),A.cursor=A.limit,h(),A.cursor=A.limit_backward,o(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}}(),e.Pipeline.registerFunction(e.es.stemmer,"stemmer-es"),e.es.stopWordFilter=e.generateStopWordFilter("a al algo algunas algunos ante antes como con contra cual cuando de del desde donde durante e el ella ellas ellos en entre era erais eran eras eres es esa esas ese eso esos esta estaba estabais estaban estabas estad estada estadas estado estados estamos estando estar estaremos estará estarán estarás estaré estaréis estaría estaríais estaríamos estarían estarías estas este estemos esto estos estoy estuve estuviera estuvierais estuvieran estuvieras estuvieron estuviese estuvieseis estuviesen estuvieses estuvimos estuviste estuvisteis estuviéramos estuviésemos estuvo está estábamos estáis están estás esté estéis estén estés fue fuera fuerais fueran fueras fueron fuese fueseis fuesen fueses fui fuimos fuiste fuisteis fuéramos fuésemos ha habida habidas habido habidos habiendo habremos habrá habrán habrás habré habréis habría habríais habríamos habrían habrías habéis había habíais habíamos habían habías han has hasta hay haya hayamos hayan hayas hayáis he hemos hube hubiera hubierais hubieran hubieras hubieron hubiese hubieseis hubiesen hubieses hubimos hubiste hubisteis hubiéramos hubiésemos hubo la las le les lo los me mi mis mucho muchos muy más mí mía mías mío míos nada ni no nos nosotras nosotros nuestra nuestras nuestro nuestros o os otra otras otro otros para pero poco por porque que quien quienes qué se sea seamos sean seas seremos será serán serás seré seréis sería seríais seríamos serían serías seáis sido siendo sin sobre sois somos son soy su sus suya suyas suyo suyos sí también tanto te tendremos tendrá tendrán tendrás tendré tendréis tendría tendríais tendríamos tendrían tendrías tened tenemos tenga tengamos tengan tengas tengo tengáis tenida tenidas tenido tenidos teniendo tenéis tenía teníais teníamos tenían tenías ti tiene tienen tienes todo todos tu tus tuve tuviera tuvierais tuvieran tuvieras tuvieron tuviese tuvieseis tuviesen tuvieses tuvimos tuviste tuvisteis tuviéramos tuviésemos tuvo tuya tuyas tuyo tuyos tú un una uno unos vosotras vosotros vuestra vuestras vuestro vuestros y ya yo él éramos".split(" ")),e.Pipeline.registerFunction(e.es.stopWordFilter,"stopWordFilter-es")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.fi.min.js b/assets/javascripts/lunr/min/lunr.fi.min.js new file mode 100644 index 000000000..29f5dfcea --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.fi.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `Finnish` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(i,e){"function"==typeof define&&define.amd?define(e):"object"==typeof exports?module.exports=e():e()(i.lunr)}(this,function(){return function(i){if(void 0===i)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===i.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");i.fi=function(){this.pipeline.reset(),this.pipeline.add(i.fi.trimmer,i.fi.stopWordFilter,i.fi.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(i.fi.stemmer))},i.fi.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",i.fi.trimmer=i.trimmerSupport.generateTrimmer(i.fi.wordCharacters),i.Pipeline.registerFunction(i.fi.trimmer,"trimmer-fi"),i.fi.stemmer=function(){var e=i.stemmerSupport.Among,r=i.stemmerSupport.SnowballProgram,n=new function(){function i(){f=A.limit,d=f,n()||(f=A.cursor,n()||(d=A.cursor))}function n(){for(var i;;){if(i=A.cursor,A.in_grouping(W,97,246))break;if(A.cursor=i,i>=A.limit)return!0;A.cursor++}for(A.cursor=i;!A.out_grouping(W,97,246);){if(A.cursor>=A.limit)return!0;A.cursor++}return!1}function t(){return d<=A.cursor}function s(){var i,e;if(A.cursor>=f)if(e=A.limit_backward,A.limit_backward=f,A.ket=A.cursor,i=A.find_among_b(h,10)){switch(A.bra=A.cursor,A.limit_backward=e,i){case 1:if(!A.in_grouping_b(x,97,246))return;break;case 2:if(!t())return}A.slice_del()}else A.limit_backward=e}function o(){var i,e,r;if(A.cursor>=f)if(e=A.limit_backward,A.limit_backward=f,A.ket=A.cursor,i=A.find_among_b(v,9))switch(A.bra=A.cursor,A.limit_backward=e,i){case 1:r=A.limit-A.cursor,A.eq_s_b(1,"k")||(A.cursor=A.limit-r,A.slice_del());break;case 2:A.slice_del(),A.ket=A.cursor,A.eq_s_b(3,"kse")&&(A.bra=A.cursor,A.slice_from("ksi"));break;case 3:A.slice_del();break;case 4:A.find_among_b(p,6)&&A.slice_del();break;case 5:A.find_among_b(g,6)&&A.slice_del();break;case 6:A.find_among_b(j,2)&&A.slice_del()}else A.limit_backward=e}function l(){return A.find_among_b(q,7)}function a(){return A.eq_s_b(1,"i")&&A.in_grouping_b(L,97,246)}function u(){var i,e,r;if(A.cursor>=f)if(e=A.limit_backward,A.limit_backward=f,A.ket=A.cursor,i=A.find_among_b(C,30)){switch(A.bra=A.cursor,A.limit_backward=e,i){case 1:if(!A.eq_s_b(1,"a"))return;break;case 2:case 9:if(!A.eq_s_b(1,"e"))return;break;case 3:if(!A.eq_s_b(1,"i"))return;break;case 4:if(!A.eq_s_b(1,"o"))return;break;case 5:if(!A.eq_s_b(1,"ä"))return;break;case 6:if(!A.eq_s_b(1,"ö"))return;break;case 7:if(r=A.limit-A.cursor,!l()&&(A.cursor=A.limit-r,!A.eq_s_b(2,"ie"))){A.cursor=A.limit-r;break}if(A.cursor=A.limit-r,A.cursor<=A.limit_backward){A.cursor=A.limit-r;break}A.cursor--,A.bra=A.cursor;break;case 8:if(!A.in_grouping_b(W,97,246)||!A.out_grouping_b(W,97,246))return}A.slice_del(),k=!0}else A.limit_backward=e}function c(){var i,e,r;if(A.cursor>=d)if(e=A.limit_backward,A.limit_backward=d,A.ket=A.cursor,i=A.find_among_b(P,14)){if(A.bra=A.cursor,A.limit_backward=e,1==i){if(r=A.limit-A.cursor,A.eq_s_b(2,"po"))return;A.cursor=A.limit-r}A.slice_del()}else A.limit_backward=e}function m(){var i;A.cursor>=f&&(i=A.limit_backward,A.limit_backward=f,A.ket=A.cursor,A.find_among_b(F,2)?(A.bra=A.cursor,A.limit_backward=i,A.slice_del()):A.limit_backward=i)}function w(){var i,e,r,n,t,s;if(A.cursor>=f){if(e=A.limit_backward,A.limit_backward=f,A.ket=A.cursor,A.eq_s_b(1,"t")&&(A.bra=A.cursor,r=A.limit-A.cursor,A.in_grouping_b(W,97,246)&&(A.cursor=A.limit-r,A.slice_del(),A.limit_backward=e,n=A.limit-A.cursor,A.cursor>=d&&(A.cursor=d,t=A.limit_backward,A.limit_backward=A.cursor,A.cursor=A.limit-n,A.ket=A.cursor,i=A.find_among_b(S,2))))){if(A.bra=A.cursor,A.limit_backward=t,1==i){if(s=A.limit-A.cursor,A.eq_s_b(2,"po"))return;A.cursor=A.limit-s}return void A.slice_del()}A.limit_backward=e}}function _(){var i,e,r,n;if(A.cursor>=f){for(i=A.limit_backward,A.limit_backward=f,e=A.limit-A.cursor,l()&&(A.cursor=A.limit-e,A.ket=A.cursor,A.cursor>A.limit_backward&&(A.cursor--,A.bra=A.cursor,A.slice_del())),A.cursor=A.limit-e,A.ket=A.cursor,A.in_grouping_b(y,97,228)&&(A.bra=A.cursor,A.out_grouping_b(W,97,246)&&A.slice_del()),A.cursor=A.limit-e,A.ket=A.cursor,A.eq_s_b(1,"j")&&(A.bra=A.cursor,r=A.limit-A.cursor,A.eq_s_b(1,"o")?A.slice_del():(A.cursor=A.limit-r,A.eq_s_b(1,"u")&&A.slice_del())),A.cursor=A.limit-e,A.ket=A.cursor,A.eq_s_b(1,"o")&&(A.bra=A.cursor,A.eq_s_b(1,"j")&&A.slice_del()),A.cursor=A.limit-e,A.limit_backward=i;;){if(n=A.limit-A.cursor,A.out_grouping_b(W,97,246)){A.cursor=A.limit-n;break}if(A.cursor=A.limit-n,A.cursor<=A.limit_backward)return;A.cursor--}A.ket=A.cursor,A.cursor>A.limit_backward&&(A.cursor--,A.bra=A.cursor,b=A.slice_to(),A.eq_v_b(b)&&A.slice_del())}}var k,b,d,f,h=[new e("pa",-1,1),new e("sti",-1,2),new e("kaan",-1,1),new e("han",-1,1),new e("kin",-1,1),new e("hän",-1,1),new e("kään",-1,1),new e("ko",-1,1),new e("pä",-1,1),new e("kö",-1,1)],p=[new e("lla",-1,-1),new e("na",-1,-1),new e("ssa",-1,-1),new e("ta",-1,-1),new e("lta",3,-1),new e("sta",3,-1)],g=[new e("llä",-1,-1),new e("nä",-1,-1),new e("ssä",-1,-1),new e("tä",-1,-1),new e("ltä",3,-1),new e("stä",3,-1)],j=[new e("lle",-1,-1),new e("ine",-1,-1)],v=[new e("nsa",-1,3),new e("mme",-1,3),new e("nne",-1,3),new e("ni",-1,2),new e("si",-1,1),new e("an",-1,4),new e("en",-1,6),new e("än",-1,5),new e("nsä",-1,3)],q=[new e("aa",-1,-1),new e("ee",-1,-1),new e("ii",-1,-1),new e("oo",-1,-1),new e("uu",-1,-1),new e("ää",-1,-1),new e("öö",-1,-1)],C=[new e("a",-1,8),new e("lla",0,-1),new e("na",0,-1),new e("ssa",0,-1),new e("ta",0,-1),new e("lta",4,-1),new e("sta",4,-1),new e("tta",4,9),new e("lle",-1,-1),new e("ine",-1,-1),new e("ksi",-1,-1),new e("n",-1,7),new e("han",11,1),new e("den",11,-1,a),new e("seen",11,-1,l),new e("hen",11,2),new e("tten",11,-1,a),new e("hin",11,3),new e("siin",11,-1,a),new e("hon",11,4),new e("hän",11,5),new e("hön",11,6),new e("ä",-1,8),new e("llä",22,-1),new e("nä",22,-1),new e("ssä",22,-1),new e("tä",22,-1),new e("ltä",26,-1),new e("stä",26,-1),new e("ttä",26,9)],P=[new e("eja",-1,-1),new e("mma",-1,1),new e("imma",1,-1),new e("mpa",-1,1),new e("impa",3,-1),new e("mmi",-1,1),new e("immi",5,-1),new e("mpi",-1,1),new e("impi",7,-1),new e("ejä",-1,-1),new e("mmä",-1,1),new e("immä",10,-1),new e("mpä",-1,1),new e("impä",12,-1)],F=[new e("i",-1,-1),new e("j",-1,-1)],S=[new e("mma",-1,1),new e("imma",0,-1)],y=[17,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8],W=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32],L=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32],x=[17,97,24,1,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32],A=new r;this.setCurrent=function(i){A.setCurrent(i)},this.getCurrent=function(){return A.getCurrent()},this.stem=function(){var e=A.cursor;return i(),k=!1,A.limit_backward=e,A.cursor=A.limit,s(),A.cursor=A.limit,o(),A.cursor=A.limit,u(),A.cursor=A.limit,c(),A.cursor=A.limit,k?(m(),A.cursor=A.limit):(A.cursor=A.limit,w(),A.cursor=A.limit),_(),!0}};return function(i){return"function"==typeof i.update?i.update(function(i){return n.setCurrent(i),n.stem(),n.getCurrent()}):(n.setCurrent(i),n.stem(),n.getCurrent())}}(),i.Pipeline.registerFunction(i.fi.stemmer,"stemmer-fi"),i.fi.stopWordFilter=i.generateStopWordFilter("ei eivät emme en et ette että he heidän heidät heihin heille heillä heiltä heissä heistä heitä hän häneen hänelle hänellä häneltä hänen hänessä hänestä hänet häntä itse ja johon joiden joihin joiksi joilla joille joilta joina joissa joista joita joka joksi jolla jolle jolta jona jonka jos jossa josta jota jotka kanssa keiden keihin keiksi keille keillä keiltä keinä keissä keistä keitä keneen keneksi kenelle kenellä keneltä kenen kenenä kenessä kenestä kenet ketkä ketkä ketä koska kuin kuka kun me meidän meidät meihin meille meillä meiltä meissä meistä meitä mihin miksi mikä mille millä miltä minkä minkä minua minulla minulle minulta minun minussa minusta minut minuun minä minä missä mistä mitkä mitä mukaan mutta ne niiden niihin niiksi niille niillä niiltä niin niin niinä niissä niistä niitä noiden noihin noiksi noilla noille noilta noin noina noissa noista noita nuo nyt näiden näihin näiksi näille näillä näiltä näinä näissä näistä näitä nämä ole olemme olen olet olette oli olimme olin olisi olisimme olisin olisit olisitte olisivat olit olitte olivat olla olleet ollut on ovat poikki se sekä sen siihen siinä siitä siksi sille sillä sillä siltä sinua sinulla sinulle sinulta sinun sinussa sinusta sinut sinuun sinä sinä sitä tai te teidän teidät teihin teille teillä teiltä teissä teistä teitä tuo tuohon tuoksi tuolla tuolle tuolta tuon tuona tuossa tuosta tuota tähän täksi tälle tällä tältä tämä tämän tänä tässä tästä tätä vaan vai vaikka yli".split(" ")),i.Pipeline.registerFunction(i.fi.stopWordFilter,"stopWordFilter-fi")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.fr.min.js b/assets/javascripts/lunr/min/lunr.fr.min.js new file mode 100644 index 000000000..68cd0094a --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.fr.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `French` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.fr=function(){this.pipeline.reset(),this.pipeline.add(e.fr.trimmer,e.fr.stopWordFilter,e.fr.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.fr.stemmer))},e.fr.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.fr.trimmer=e.trimmerSupport.generateTrimmer(e.fr.wordCharacters),e.Pipeline.registerFunction(e.fr.trimmer,"trimmer-fr"),e.fr.stemmer=function(){var r=e.stemmerSupport.Among,s=e.stemmerSupport.SnowballProgram,i=new function(){function e(e,r,s){return!(!W.eq_s(1,e)||(W.ket=W.cursor,!W.in_grouping(F,97,251)))&&(W.slice_from(r),W.cursor=s,!0)}function i(e,r,s){return!!W.eq_s(1,e)&&(W.ket=W.cursor,W.slice_from(r),W.cursor=s,!0)}function n(){for(var r,s;;){if(r=W.cursor,W.in_grouping(F,97,251)){if(W.bra=W.cursor,s=W.cursor,e("u","U",r))continue;if(W.cursor=s,e("i","I",r))continue;if(W.cursor=s,i("y","Y",r))continue}if(W.cursor=r,W.bra=r,!e("y","Y",r)){if(W.cursor=r,W.eq_s(1,"q")&&(W.bra=W.cursor,i("u","U",r)))continue;if(W.cursor=r,r>=W.limit)return;W.cursor++}}}function t(){for(;!W.in_grouping(F,97,251);){if(W.cursor>=W.limit)return!0;W.cursor++}for(;!W.out_grouping(F,97,251);){if(W.cursor>=W.limit)return!0;W.cursor++}return!1}function u(){var e=W.cursor;if(q=W.limit,g=q,p=q,W.in_grouping(F,97,251)&&W.in_grouping(F,97,251)&&W.cursor=W.limit){W.cursor=q;break}W.cursor++}while(!W.in_grouping(F,97,251))}q=W.cursor,W.cursor=e,t()||(g=W.cursor,t()||(p=W.cursor))}function o(){for(var e,r;;){if(r=W.cursor,W.bra=r,!(e=W.find_among(h,4)))break;switch(W.ket=W.cursor,e){case 1:W.slice_from("i");break;case 2:W.slice_from("u");break;case 3:W.slice_from("y");break;case 4:if(W.cursor>=W.limit)return;W.cursor++}}}function c(){return q<=W.cursor}function a(){return g<=W.cursor}function l(){return p<=W.cursor}function w(){var e,r;if(W.ket=W.cursor,e=W.find_among_b(C,43)){switch(W.bra=W.cursor,e){case 1:if(!l())return!1;W.slice_del();break;case 2:if(!l())return!1;W.slice_del(),W.ket=W.cursor,W.eq_s_b(2,"ic")&&(W.bra=W.cursor,l()?W.slice_del():W.slice_from("iqU"));break;case 3:if(!l())return!1;W.slice_from("log");break;case 4:if(!l())return!1;W.slice_from("u");break;case 5:if(!l())return!1;W.slice_from("ent");break;case 6:if(!c())return!1;if(W.slice_del(),W.ket=W.cursor,e=W.find_among_b(z,6))switch(W.bra=W.cursor,e){case 1:l()&&(W.slice_del(),W.ket=W.cursor,W.eq_s_b(2,"at")&&(W.bra=W.cursor,l()&&W.slice_del()));break;case 2:l()?W.slice_del():a()&&W.slice_from("eux");break;case 3:l()&&W.slice_del();break;case 4:c()&&W.slice_from("i")}break;case 7:if(!l())return!1;if(W.slice_del(),W.ket=W.cursor,e=W.find_among_b(y,3))switch(W.bra=W.cursor,e){case 1:l()?W.slice_del():W.slice_from("abl");break;case 2:l()?W.slice_del():W.slice_from("iqU");break;case 3:l()&&W.slice_del()}break;case 8:if(!l())return!1;if(W.slice_del(),W.ket=W.cursor,W.eq_s_b(2,"at")&&(W.bra=W.cursor,l()&&(W.slice_del(),W.ket=W.cursor,W.eq_s_b(2,"ic")))){W.bra=W.cursor,l()?W.slice_del():W.slice_from("iqU");break}break;case 9:W.slice_from("eau");break;case 10:if(!a())return!1;W.slice_from("al");break;case 11:if(l())W.slice_del();else{if(!a())return!1;W.slice_from("eux")}break;case 12:if(!a()||!W.out_grouping_b(F,97,251))return!1;W.slice_del();break;case 13:return c()&&W.slice_from("ant"),!1;case 14:return c()&&W.slice_from("ent"),!1;case 15:return r=W.limit-W.cursor,W.in_grouping_b(F,97,251)&&c()&&(W.cursor=W.limit-r,W.slice_del()),!1}return!0}return!1}function f(){var e,r;if(W.cursor=q){if(s=W.limit_backward,W.limit_backward=q,W.ket=W.cursor,e=W.find_among_b(P,7))switch(W.bra=W.cursor,e){case 1:if(l()){if(i=W.limit-W.cursor,!W.eq_s_b(1,"s")&&(W.cursor=W.limit-i,!W.eq_s_b(1,"t")))break;W.slice_del()}break;case 2:W.slice_from("i");break;case 3:W.slice_del();break;case 4:W.eq_s_b(2,"gu")&&W.slice_del()}W.limit_backward=s}}function b(){var e=W.limit-W.cursor;W.find_among_b(U,5)&&(W.cursor=W.limit-e,W.ket=W.cursor,W.cursor>W.limit_backward&&(W.cursor--,W.bra=W.cursor,W.slice_del()))}function d(){for(var e,r=1;W.out_grouping_b(F,97,251);)r--;if(r<=0){if(W.ket=W.cursor,e=W.limit-W.cursor,!W.eq_s_b(1,"é")&&(W.cursor=W.limit-e,!W.eq_s_b(1,"è")))return;W.bra=W.cursor,W.slice_from("e")}}function k(){if(!w()&&(W.cursor=W.limit,!f()&&(W.cursor=W.limit,!m())))return W.cursor=W.limit,void _();W.cursor=W.limit,W.ket=W.cursor,W.eq_s_b(1,"Y")?(W.bra=W.cursor,W.slice_from("i")):(W.cursor=W.limit,W.eq_s_b(1,"ç")&&(W.bra=W.cursor,W.slice_from("c")))}var p,g,q,v=[new r("col",-1,-1),new r("par",-1,-1),new r("tap",-1,-1)],h=[new r("",-1,4),new r("I",0,1),new r("U",0,2),new r("Y",0,3)],z=[new r("iqU",-1,3),new r("abl",-1,3),new r("Ièr",-1,4),new r("ièr",-1,4),new r("eus",-1,2),new r("iv",-1,1)],y=[new r("ic",-1,2),new r("abil",-1,1),new r("iv",-1,3)],C=[new r("iqUe",-1,1),new r("atrice",-1,2),new r("ance",-1,1),new r("ence",-1,5),new r("logie",-1,3),new r("able",-1,1),new r("isme",-1,1),new r("euse",-1,11),new r("iste",-1,1),new r("ive",-1,8),new r("if",-1,8),new r("usion",-1,4),new r("ation",-1,2),new r("ution",-1,4),new r("ateur",-1,2),new r("iqUes",-1,1),new r("atrices",-1,2),new r("ances",-1,1),new r("ences",-1,5),new r("logies",-1,3),new r("ables",-1,1),new r("ismes",-1,1),new r("euses",-1,11),new r("istes",-1,1),new r("ives",-1,8),new r("ifs",-1,8),new r("usions",-1,4),new r("ations",-1,2),new r("utions",-1,4),new r("ateurs",-1,2),new r("ments",-1,15),new r("ements",30,6),new r("issements",31,12),new r("ités",-1,7),new r("ment",-1,15),new r("ement",34,6),new r("issement",35,12),new r("amment",34,13),new r("emment",34,14),new r("aux",-1,10),new r("eaux",39,9),new r("eux",-1,1),new r("ité",-1,7)],x=[new r("ira",-1,1),new r("ie",-1,1),new r("isse",-1,1),new r("issante",-1,1),new r("i",-1,1),new r("irai",4,1),new r("ir",-1,1),new r("iras",-1,1),new r("ies",-1,1),new r("îmes",-1,1),new r("isses",-1,1),new r("issantes",-1,1),new r("îtes",-1,1),new r("is",-1,1),new r("irais",13,1),new r("issais",13,1),new r("irions",-1,1),new r("issions",-1,1),new r("irons",-1,1),new r("issons",-1,1),new r("issants",-1,1),new r("it",-1,1),new r("irait",21,1),new r("issait",21,1),new r("issant",-1,1),new r("iraIent",-1,1),new r("issaIent",-1,1),new r("irent",-1,1),new r("issent",-1,1),new r("iront",-1,1),new r("ît",-1,1),new r("iriez",-1,1),new r("issiez",-1,1),new r("irez",-1,1),new r("issez",-1,1)],I=[new r("a",-1,3),new r("era",0,2),new r("asse",-1,3),new r("ante",-1,3),new r("ée",-1,2),new r("ai",-1,3),new r("erai",5,2),new r("er",-1,2),new r("as",-1,3),new r("eras",8,2),new r("âmes",-1,3),new r("asses",-1,3),new r("antes",-1,3),new r("âtes",-1,3),new r("ées",-1,2),new r("ais",-1,3),new r("erais",15,2),new r("ions",-1,1),new r("erions",17,2),new r("assions",17,3),new r("erons",-1,2),new r("ants",-1,3),new r("és",-1,2),new r("ait",-1,3),new r("erait",23,2),new r("ant",-1,3),new r("aIent",-1,3),new r("eraIent",26,2),new r("èrent",-1,2),new r("assent",-1,3),new r("eront",-1,2),new r("ât",-1,3),new r("ez",-1,2),new r("iez",32,2),new r("eriez",33,2),new r("assiez",33,3),new r("erez",32,2),new r("é",-1,2)],P=[new r("e",-1,3),new r("Ière",0,2),new r("ière",0,2),new r("ion",-1,1),new r("Ier",-1,2),new r("ier",-1,2),new r("ë",-1,4)],U=[new r("ell",-1,-1),new r("eill",-1,-1),new r("enn",-1,-1),new r("onn",-1,-1),new r("ett",-1,-1)],F=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,128,130,103,8,5],S=[1,65,20,0,0,0,0,0,0,0,0,0,0,0,0,0,128],W=new s;this.setCurrent=function(e){W.setCurrent(e)},this.getCurrent=function(){return W.getCurrent()},this.stem=function(){var e=W.cursor;return n(),W.cursor=e,u(),W.limit_backward=e,W.cursor=W.limit,k(),W.cursor=W.limit,b(),W.cursor=W.limit,d(),W.cursor=W.limit_backward,o(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}}(),e.Pipeline.registerFunction(e.fr.stemmer,"stemmer-fr"),e.fr.stopWordFilter=e.generateStopWordFilter("ai aie aient aies ait as au aura aurai auraient aurais aurait auras aurez auriez aurions aurons auront aux avaient avais avait avec avez aviez avions avons ayant ayez ayons c ce ceci celà ces cet cette d dans de des du elle en es est et eu eue eues eurent eus eusse eussent eusses eussiez eussions eut eux eûmes eût eûtes furent fus fusse fussent fusses fussiez fussions fut fûmes fût fûtes ici il ils j je l la le les leur leurs lui m ma mais me mes moi mon même n ne nos notre nous on ont ou par pas pour qu que quel quelle quelles quels qui s sa sans se sera serai seraient serais serait seras serez seriez serions serons seront ses soi soient sois soit sommes son sont soyez soyons suis sur t ta te tes toi ton tu un une vos votre vous y à étaient étais était étant étiez étions été étée étées étés êtes".split(" ")),e.Pipeline.registerFunction(e.fr.stopWordFilter,"stopWordFilter-fr")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.he.min.js b/assets/javascripts/lunr/min/lunr.he.min.js new file mode 100644 index 000000000..b863d3eae --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.he.min.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.he=function(){this.pipeline.reset(),this.pipeline.add(e.he.trimmer,e.he.stopWordFilter,e.he.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.he.stemmer))},e.he.wordCharacters="֑-״א-תa-zA-Za-zA-Z0-90-9",e.he.trimmer=e.trimmerSupport.generateTrimmer(e.he.wordCharacters),e.Pipeline.registerFunction(e.he.trimmer,"trimmer-he"),e.he.stemmer=function(){var e=this;return e.result=!1,e.preRemoved=!1,e.sufRemoved=!1,e.pre={pre1:"ה ו י ת",pre2:"ב כ ל מ ש כש",pre3:"הב הכ הל המ הש בש לכ",pre4:"וב וכ ול ומ וש",pre5:"מה שה כל",pre6:"מב מכ מל ממ מש",pre7:"בה בו בי בת כה כו כי כת לה לו לי לת",pre8:"ובה ובו ובי ובת וכה וכו וכי וכת ולה ולו ולי ולת"},e.suf={suf1:"ך כ ם ן נ",suf2:"ים ות וך וכ ום ון ונ הם הן יכ יך ינ ים",suf3:"תי תך תכ תם תן תנ",suf4:"ותי ותך ותכ ותם ותן ותנ",suf5:"נו כם כן הם הן",suf6:"ונו וכם וכן והם והן",suf7:"תכם תכן תנו תהם תהן",suf8:"הוא היא הם הן אני אתה את אנו אתם אתן",suf9:"ני נו כי כו כם כן תי תך תכ תם תן",suf10:"י ך כ ם ן נ ת"},e.patterns=JSON.parse('{"hebrewPatterns": [{"pt1": [{"c": "ה", "l": 0}]}, {"pt2": [{"c": "ו", "l": 0}]}, {"pt3": [{"c": "י", "l": 0}]}, {"pt4": [{"c": "ת", "l": 0}]}, {"pt5": [{"c": "מ", "l": 0}]}, {"pt6": [{"c": "ל", "l": 0}]}, {"pt7": [{"c": "ב", "l": 0}]}, {"pt8": [{"c": "כ", "l": 0}]}, {"pt9": [{"c": "ש", "l": 0}]}, {"pt10": [{"c": "כש", "l": 0}]}, {"pt11": [{"c": "בה", "l": 0}]}, {"pt12": [{"c": "וב", "l": 0}]}, {"pt13": [{"c": "וכ", "l": 0}]}, {"pt14": [{"c": "ול", "l": 0}]}, {"pt15": [{"c": "ומ", "l": 0}]}, {"pt16": [{"c": "וש", "l": 0}]}, {"pt17": [{"c": "הב", "l": 0}]}, {"pt18": [{"c": "הכ", "l": 0}]}, {"pt19": [{"c": "הל", "l": 0}]}, {"pt20": [{"c": "המ", "l": 0}]}, {"pt21": [{"c": "הש", "l": 0}]}, {"pt22": [{"c": "מה", "l": 0}]}, {"pt23": [{"c": "שה", "l": 0}]}, {"pt24": [{"c": "כל", "l": 0}]}]}'),e.execArray=["cleanWord","removeDiacritics","removeStopWords","normalizeHebrewCharacters"],e.stem=function(){var r=0;for(e.result=!1,e.preRemoved=!1,e.sufRemoved=!1;r=0)return!0},e.normalizeHebrewCharacters=function(){return e.word=e.word.replace("ך","כ"),e.word=e.word.replace("ם","מ"),e.word=e.word.replace("ן","נ"),e.word=e.word.replace("ף","פ"),e.word=e.word.replace("ץ","צ"),!1},function(r){return"function"==typeof r.update?r.update(function(r){return e.setCurrent(r),e.stem(),e.getCurrent()}):(e.setCurrent(r),e.stem(),e.getCurrent())}}(),e.Pipeline.registerFunction(e.he.stemmer,"stemmer-he"),e.he.stopWordFilter=e.generateStopWordFilter("אבל או אולי אותו אותי אותך אותם אותן אותנו אז אחר אחרות אחרי אחריכן אחרים אחרת אי איזה איך אין איפה אל אלה אלו אם אנחנו אני אף אפשר את אתה אתכם אתכן אתם אתן באיזה באיזו בגלל בין בלבד בעבור בעזרת בכל בכן בלי במידה במקום שבו ברוב בשביל בשעה ש בתוך גם דרך הוא היא היה היי היכן היתה היתי הם הן הנה הסיבה שבגללה הרי ואילו ואת זאת זה זות יהיה יוכל יוכלו יותר מדי יכול יכולה יכולות יכולים יכל יכלה יכלו יש כאן כאשר כולם כולן כזה כי כיצד כך כל כלל כמו כן כפי כש לא לאו לאיזותך לאן לבין לה להיות להם להן לו לזה לזות לי לך לכם לכן למה למעלה למעלה מ למטה למטה מ למעט למקום שבו למרות לנו לעבר לעיכן לפיכך לפני מאד מאחורי מאיזו סיבה מאין מאיפה מבלי מבעד מדוע מה מהיכן מול מחוץ מי מידע מכאן מכל מכן מלבד מן מנין מסוגל מעט מעטים מעל מצד מקום בו מתחת מתי נגד נגר נו עד עז על עלי עליו עליה עליהם עליך עלינו עם עצמה עצמהם עצמהן עצמו עצמי עצמם עצמן עצמנו פה רק שוב של שלה שלהם שלהן שלו שלי שלך שלכה שלכם שלכן שלנו שם תהיה תחת".split(" ")),e.Pipeline.registerFunction(e.he.stopWordFilter,"stopWordFilter-he")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.hi.min.js b/assets/javascripts/lunr/min/lunr.hi.min.js new file mode 100644 index 000000000..7dbc41402 --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.hi.min.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.hi=function(){this.pipeline.reset(),this.pipeline.add(e.hi.trimmer,e.hi.stopWordFilter,e.hi.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.hi.stemmer))},e.hi.wordCharacters="ऀ-ःऄ-एऐ-टठ-यर-िी-ॏॐ-य़ॠ-९॰-ॿa-zA-Za-zA-Z0-90-9",e.hi.trimmer=e.trimmerSupport.generateTrimmer(e.hi.wordCharacters),e.Pipeline.registerFunction(e.hi.trimmer,"trimmer-hi"),e.hi.stopWordFilter=e.generateStopWordFilter("अत अपना अपनी अपने अभी अंदर आदि आप इत्यादि इन इनका इन्हीं इन्हें इन्हों इस इसका इसकी इसके इसमें इसी इसे उन उनका उनकी उनके उनको उन्हीं उन्हें उन्हों उस उसके उसी उसे एक एवं एस ऐसे और कई कर करता करते करना करने करें कहते कहा का काफ़ी कि कितना किन्हें किन्हों किया किर किस किसी किसे की कुछ कुल के को कोई कौन कौनसा गया घर जब जहाँ जा जितना जिन जिन्हें जिन्हों जिस जिसे जीधर जैसा जैसे जो तक तब तरह तिन तिन्हें तिन्हों तिस तिसे तो था थी थे दबारा दिया दुसरा दूसरे दो द्वारा न नके नहीं ना निहायत नीचे ने पर पहले पूरा पे फिर बनी बही बहुत बाद बाला बिलकुल भी भीतर मगर मानो मे में यदि यह यहाँ यही या यिह ये रखें रहा रहे ऱ्वासा लिए लिये लेकिन व वग़ैरह वर्ग वह वहाँ वहीं वाले वुह वे वो सकता सकते सबसे सभी साथ साबुत साभ सारा से सो संग ही हुआ हुई हुए है हैं हो होता होती होते होना होने".split(" ")),e.hi.stemmer=function(){return function(e){return"function"==typeof e.update?e.update(function(e){return e}):e}}();var r=e.wordcut;r.init(),e.hi.tokenizer=function(i){if(!arguments.length||null==i||void 0==i)return[];if(Array.isArray(i))return i.map(function(r){return isLunr2?new e.Token(r.toLowerCase()):r.toLowerCase()});var t=i.toString().toLowerCase().replace(/^\s+/,"");return r.cut(t).split("|")},e.Pipeline.registerFunction(e.hi.stemmer,"stemmer-hi"),e.Pipeline.registerFunction(e.hi.stopWordFilter,"stopWordFilter-hi")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.hu.min.js b/assets/javascripts/lunr/min/lunr.hu.min.js new file mode 100644 index 000000000..ed9d909f7 --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.hu.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `Hungarian` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(e,n){"function"==typeof define&&define.amd?define(n):"object"==typeof exports?module.exports=n():n()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.hu=function(){this.pipeline.reset(),this.pipeline.add(e.hu.trimmer,e.hu.stopWordFilter,e.hu.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.hu.stemmer))},e.hu.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.hu.trimmer=e.trimmerSupport.generateTrimmer(e.hu.wordCharacters),e.Pipeline.registerFunction(e.hu.trimmer,"trimmer-hu"),e.hu.stemmer=function(){var n=e.stemmerSupport.Among,r=e.stemmerSupport.SnowballProgram,i=new function(){function e(){var e,n=L.cursor;if(d=L.limit,L.in_grouping(W,97,252))for(;;){if(e=L.cursor,L.out_grouping(W,97,252))return L.cursor=e,L.find_among(g,8)||(L.cursor=e,e=L.limit)return void(d=e);L.cursor++}if(L.cursor=n,L.out_grouping(W,97,252)){for(;!L.in_grouping(W,97,252);){if(L.cursor>=L.limit)return;L.cursor++}d=L.cursor}}function i(){return d<=L.cursor}function a(){var e;if(L.ket=L.cursor,(e=L.find_among_b(h,2))&&(L.bra=L.cursor,i()))switch(e){case 1:L.slice_from("a");break;case 2:L.slice_from("e")}}function t(){var e=L.limit-L.cursor;return!!L.find_among_b(p,23)&&(L.cursor=L.limit-e,!0)}function s(){if(L.cursor>L.limit_backward){L.cursor--,L.ket=L.cursor;var e=L.cursor-1;L.limit_backward<=e&&e<=L.limit&&(L.cursor=e,L.bra=e,L.slice_del())}}function c(){var e;if(L.ket=L.cursor,(e=L.find_among_b(_,2))&&(L.bra=L.cursor,i())){if((1==e||2==e)&&!t())return;L.slice_del(),s()}}function o(){L.ket=L.cursor,L.find_among_b(v,44)&&(L.bra=L.cursor,i()&&(L.slice_del(),a()))}function w(){var e;if(L.ket=L.cursor,(e=L.find_among_b(z,3))&&(L.bra=L.cursor,i()))switch(e){case 1:L.slice_from("e");break;case 2:case 3:L.slice_from("a")}}function l(){var e;if(L.ket=L.cursor,(e=L.find_among_b(y,6))&&(L.bra=L.cursor,i()))switch(e){case 1:case 2:L.slice_del();break;case 3:L.slice_from("a");break;case 4:L.slice_from("e")}}function u(){var e;if(L.ket=L.cursor,(e=L.find_among_b(j,2))&&(L.bra=L.cursor,i())){if((1==e||2==e)&&!t())return;L.slice_del(),s()}}function m(){var e;if(L.ket=L.cursor,(e=L.find_among_b(C,7))&&(L.bra=L.cursor,i()))switch(e){case 1:L.slice_from("a");break;case 2:L.slice_from("e");break;case 3:case 4:case 5:case 6:case 7:L.slice_del()}}function k(){var e;if(L.ket=L.cursor,(e=L.find_among_b(P,12))&&(L.bra=L.cursor,i()))switch(e){case 1:case 4:case 7:case 9:L.slice_del();break;case 2:case 5:case 8:L.slice_from("e");break;case 3:case 6:L.slice_from("a")}}function f(){var e;if(L.ket=L.cursor,(e=L.find_among_b(F,31))&&(L.bra=L.cursor,i()))switch(e){case 1:case 4:case 7:case 8:case 9:case 12:case 13:case 16:case 17:case 18:L.slice_del();break;case 2:case 5:case 10:case 14:case 19:L.slice_from("a");break;case 3:case 6:case 11:case 15:case 20:L.slice_from("e")}}function b(){var e;if(L.ket=L.cursor,(e=L.find_among_b(S,42))&&(L.bra=L.cursor,i()))switch(e){case 1:case 4:case 5:case 6:case 9:case 10:case 11:case 14:case 15:case 16:case 17:case 20:case 21:case 24:case 25:case 26:case 29:L.slice_del();break;case 2:case 7:case 12:case 18:case 22:case 27:L.slice_from("a");break;case 3:case 8:case 13:case 19:case 23:case 28:L.slice_from("e")}}var d,g=[new n("cs",-1,-1),new n("dzs",-1,-1),new n("gy",-1,-1),new n("ly",-1,-1),new n("ny",-1,-1),new n("sz",-1,-1),new n("ty",-1,-1),new n("zs",-1,-1)],h=[new n("á",-1,1),new n("é",-1,2)],p=[new n("bb",-1,-1),new n("cc",-1,-1),new n("dd",-1,-1),new n("ff",-1,-1),new n("gg",-1,-1),new n("jj",-1,-1),new n("kk",-1,-1),new n("ll",-1,-1),new n("mm",-1,-1),new n("nn",-1,-1),new n("pp",-1,-1),new n("rr",-1,-1),new n("ccs",-1,-1),new n("ss",-1,-1),new n("zzs",-1,-1),new n("tt",-1,-1),new n("vv",-1,-1),new n("ggy",-1,-1),new n("lly",-1,-1),new n("nny",-1,-1),new n("tty",-1,-1),new n("ssz",-1,-1),new n("zz",-1,-1)],_=[new n("al",-1,1),new n("el",-1,2)],v=[new n("ba",-1,-1),new n("ra",-1,-1),new n("be",-1,-1),new n("re",-1,-1),new n("ig",-1,-1),new n("nak",-1,-1),new n("nek",-1,-1),new n("val",-1,-1),new n("vel",-1,-1),new n("ul",-1,-1),new n("nál",-1,-1),new n("nél",-1,-1),new n("ból",-1,-1),new n("ról",-1,-1),new n("tól",-1,-1),new n("bõl",-1,-1),new n("rõl",-1,-1),new n("tõl",-1,-1),new n("ül",-1,-1),new n("n",-1,-1),new n("an",19,-1),new n("ban",20,-1),new n("en",19,-1),new n("ben",22,-1),new n("képpen",22,-1),new n("on",19,-1),new n("ön",19,-1),new n("képp",-1,-1),new n("kor",-1,-1),new n("t",-1,-1),new n("at",29,-1),new n("et",29,-1),new n("ként",29,-1),new n("anként",32,-1),new n("enként",32,-1),new n("onként",32,-1),new n("ot",29,-1),new n("ért",29,-1),new n("öt",29,-1),new n("hez",-1,-1),new n("hoz",-1,-1),new n("höz",-1,-1),new n("vá",-1,-1),new n("vé",-1,-1)],z=[new n("án",-1,2),new n("én",-1,1),new n("ánként",-1,3)],y=[new n("stul",-1,2),new n("astul",0,1),new n("ástul",0,3),new n("stül",-1,2),new n("estül",3,1),new n("éstül",3,4)],j=[new n("á",-1,1),new n("é",-1,2)],C=[new n("k",-1,7),new n("ak",0,4),new n("ek",0,6),new n("ok",0,5),new n("ák",0,1),new n("ék",0,2),new n("ök",0,3)],P=[new n("éi",-1,7),new n("áéi",0,6),new n("ééi",0,5),new n("é",-1,9),new n("ké",3,4),new n("aké",4,1),new n("eké",4,1),new n("oké",4,1),new n("áké",4,3),new n("éké",4,2),new n("öké",4,1),new n("éé",3,8)],F=[new n("a",-1,18),new n("ja",0,17),new n("d",-1,16),new n("ad",2,13),new n("ed",2,13),new n("od",2,13),new n("ád",2,14),new n("éd",2,15),new n("öd",2,13),new n("e",-1,18),new n("je",9,17),new n("nk",-1,4),new n("unk",11,1),new n("ánk",11,2),new n("énk",11,3),new n("ünk",11,1),new n("uk",-1,8),new n("juk",16,7),new n("ájuk",17,5),new n("ük",-1,8),new n("jük",19,7),new n("éjük",20,6),new n("m",-1,12),new n("am",22,9),new n("em",22,9),new n("om",22,9),new n("ám",22,10),new n("ém",22,11),new n("o",-1,18),new n("á",-1,19),new n("é",-1,20)],S=[new n("id",-1,10),new n("aid",0,9),new n("jaid",1,6),new n("eid",0,9),new n("jeid",3,6),new n("áid",0,7),new n("éid",0,8),new n("i",-1,15),new n("ai",7,14),new n("jai",8,11),new n("ei",7,14),new n("jei",10,11),new n("ái",7,12),new n("éi",7,13),new n("itek",-1,24),new n("eitek",14,21),new n("jeitek",15,20),new n("éitek",14,23),new n("ik",-1,29),new n("aik",18,26),new n("jaik",19,25),new n("eik",18,26),new n("jeik",21,25),new n("áik",18,27),new n("éik",18,28),new n("ink",-1,20),new n("aink",25,17),new n("jaink",26,16),new n("eink",25,17),new n("jeink",28,16),new n("áink",25,18),new n("éink",25,19),new n("aitok",-1,21),new n("jaitok",32,20),new n("áitok",-1,22),new n("im",-1,5),new n("aim",35,4),new n("jaim",36,1),new n("eim",35,4),new n("jeim",38,1),new n("áim",35,2),new n("éim",35,3)],W=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,1,17,52,14],L=new r;this.setCurrent=function(e){L.setCurrent(e)},this.getCurrent=function(){return L.getCurrent()},this.stem=function(){var n=L.cursor;return e(),L.limit_backward=n,L.cursor=L.limit,c(),L.cursor=L.limit,o(),L.cursor=L.limit,w(),L.cursor=L.limit,l(),L.cursor=L.limit,u(),L.cursor=L.limit,k(),L.cursor=L.limit,f(),L.cursor=L.limit,b(),L.cursor=L.limit,m(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}}(),e.Pipeline.registerFunction(e.hu.stemmer,"stemmer-hu"),e.hu.stopWordFilter=e.generateStopWordFilter("a abban ahhoz ahogy ahol aki akik akkor alatt amely amelyek amelyekben amelyeket amelyet amelynek ami amikor amit amolyan amíg annak arra arról az azok azon azonban azt aztán azután azzal azért be belül benne bár cikk cikkek cikkeket csak de e ebben eddig egy egyes egyetlen egyik egyre egyéb egész ehhez ekkor el ellen elsõ elég elõ elõször elõtt emilyen ennek erre ez ezek ezen ezt ezzel ezért fel felé hanem hiszen hogy hogyan igen ill ill. illetve ilyen ilyenkor ismét ison itt jobban jó jól kell kellett keressünk keresztül ki kívül között közül legalább legyen lehet lehetett lenne lenni lesz lett maga magát majd majd meg mellett mely melyek mert mi mikor milyen minden mindenki mindent mindig mint mintha mit mivel miért most már más másik még míg nagy nagyobb nagyon ne nekem neki nem nincs néha néhány nélkül olyan ott pedig persze rá s saját sem semmi sok sokat sokkal szemben szerint szinte számára talán tehát teljes tovább továbbá több ugyanis utolsó után utána vagy vagyis vagyok valaki valami valamint való van vannak vele vissza viszont volna volt voltak voltam voltunk által általában át én éppen és így õ õk õket össze úgy új újabb újra".split(" ")),e.Pipeline.registerFunction(e.hu.stopWordFilter,"stopWordFilter-hu")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.hy.min.js b/assets/javascripts/lunr/min/lunr.hy.min.js new file mode 100644 index 000000000..b37f79298 --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.hy.min.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.hy=function(){this.pipeline.reset(),this.pipeline.add(e.hy.trimmer,e.hy.stopWordFilter)},e.hy.wordCharacters="[A-Za-z԰-֏ff-ﭏ]",e.hy.trimmer=e.trimmerSupport.generateTrimmer(e.hy.wordCharacters),e.Pipeline.registerFunction(e.hy.trimmer,"trimmer-hy"),e.hy.stopWordFilter=e.generateStopWordFilter("դու և եք էիր էիք հետո նաև նրանք որը վրա է որ պիտի են այս մեջ ն իր ու ի այդ որոնք այն կամ էր մի ես համար այլ իսկ էին ենք հետ ին թ էինք մենք նրա նա դուք եմ էի ըստ որպես ում".split(" ")),e.Pipeline.registerFunction(e.hy.stopWordFilter,"stopWordFilter-hy"),e.hy.stemmer=function(){return function(e){return"function"==typeof e.update?e.update(function(e){return e}):e}}(),e.Pipeline.registerFunction(e.hy.stemmer,"stemmer-hy")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.it.min.js b/assets/javascripts/lunr/min/lunr.it.min.js new file mode 100644 index 000000000..344b6a3c0 --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.it.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `Italian` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.it=function(){this.pipeline.reset(),this.pipeline.add(e.it.trimmer,e.it.stopWordFilter,e.it.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.it.stemmer))},e.it.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.it.trimmer=e.trimmerSupport.generateTrimmer(e.it.wordCharacters),e.Pipeline.registerFunction(e.it.trimmer,"trimmer-it"),e.it.stemmer=function(){var r=e.stemmerSupport.Among,n=e.stemmerSupport.SnowballProgram,i=new function(){function e(e,r,n){return!(!x.eq_s(1,e)||(x.ket=x.cursor,!x.in_grouping(L,97,249)))&&(x.slice_from(r),x.cursor=n,!0)}function i(){for(var r,n,i,o,t=x.cursor;;){if(x.bra=x.cursor,r=x.find_among(h,7))switch(x.ket=x.cursor,r){case 1:x.slice_from("à");continue;case 2:x.slice_from("è");continue;case 3:x.slice_from("ì");continue;case 4:x.slice_from("ò");continue;case 5:x.slice_from("ù");continue;case 6:x.slice_from("qU");continue;case 7:if(x.cursor>=x.limit)break;x.cursor++;continue}break}for(x.cursor=t;;)for(n=x.cursor;;){if(i=x.cursor,x.in_grouping(L,97,249)){if(x.bra=x.cursor,o=x.cursor,e("u","U",i))break;if(x.cursor=o,e("i","I",i))break}if(x.cursor=i,x.cursor>=x.limit)return void(x.cursor=n);x.cursor++}}function o(e){if(x.cursor=e,!x.in_grouping(L,97,249))return!1;for(;!x.out_grouping(L,97,249);){if(x.cursor>=x.limit)return!1;x.cursor++}return!0}function t(){if(x.in_grouping(L,97,249)){var e=x.cursor;if(x.out_grouping(L,97,249)){for(;!x.in_grouping(L,97,249);){if(x.cursor>=x.limit)return o(e);x.cursor++}return!0}return o(e)}return!1}function s(){var e,r=x.cursor;if(!t()){if(x.cursor=r,!x.out_grouping(L,97,249))return;if(e=x.cursor,x.out_grouping(L,97,249)){for(;!x.in_grouping(L,97,249);){if(x.cursor>=x.limit)return x.cursor=e,void(x.in_grouping(L,97,249)&&x.cursor=x.limit)return;x.cursor++}k=x.cursor}function a(){for(;!x.in_grouping(L,97,249);){if(x.cursor>=x.limit)return!1;x.cursor++}for(;!x.out_grouping(L,97,249);){if(x.cursor>=x.limit)return!1;x.cursor++}return!0}function u(){var e=x.cursor;k=x.limit,p=k,g=k,s(),x.cursor=e,a()&&(p=x.cursor,a()&&(g=x.cursor))}function c(){for(var e;;){if(x.bra=x.cursor,!(e=x.find_among(q,3)))break;switch(x.ket=x.cursor,e){case 1:x.slice_from("i");break;case 2:x.slice_from("u");break;case 3:if(x.cursor>=x.limit)return;x.cursor++}}}function w(){return k<=x.cursor}function l(){return p<=x.cursor}function m(){return g<=x.cursor}function f(){var e;if(x.ket=x.cursor,x.find_among_b(C,37)&&(x.bra=x.cursor,(e=x.find_among_b(z,5))&&w()))switch(e){case 1:x.slice_del();break;case 2:x.slice_from("e")}}function v(){var e;if(x.ket=x.cursor,!(e=x.find_among_b(S,51)))return!1;switch(x.bra=x.cursor,e){case 1:if(!m())return!1;x.slice_del();break;case 2:if(!m())return!1;x.slice_del(),x.ket=x.cursor,x.eq_s_b(2,"ic")&&(x.bra=x.cursor,m()&&x.slice_del());break;case 3:if(!m())return!1;x.slice_from("log");break;case 4:if(!m())return!1;x.slice_from("u");break;case 5:if(!m())return!1;x.slice_from("ente");break;case 6:if(!w())return!1;x.slice_del();break;case 7:if(!l())return!1;x.slice_del(),x.ket=x.cursor,e=x.find_among_b(P,4),e&&(x.bra=x.cursor,m()&&(x.slice_del(),1==e&&(x.ket=x.cursor,x.eq_s_b(2,"at")&&(x.bra=x.cursor,m()&&x.slice_del()))));break;case 8:if(!m())return!1;x.slice_del(),x.ket=x.cursor,e=x.find_among_b(F,3),e&&(x.bra=x.cursor,1==e&&m()&&x.slice_del());break;case 9:if(!m())return!1;x.slice_del(),x.ket=x.cursor,x.eq_s_b(2,"at")&&(x.bra=x.cursor,m()&&(x.slice_del(),x.ket=x.cursor,x.eq_s_b(2,"ic")&&(x.bra=x.cursor,m()&&x.slice_del())))}return!0}function b(){var e,r;x.cursor>=k&&(r=x.limit_backward,x.limit_backward=k,x.ket=x.cursor,e=x.find_among_b(W,87),e&&(x.bra=x.cursor,1==e&&x.slice_del()),x.limit_backward=r)}function d(){var e=x.limit-x.cursor;if(x.ket=x.cursor,x.in_grouping_b(y,97,242)&&(x.bra=x.cursor,w()&&(x.slice_del(),x.ket=x.cursor,x.eq_s_b(1,"i")&&(x.bra=x.cursor,w()))))return void x.slice_del();x.cursor=x.limit-e}function _(){d(),x.ket=x.cursor,x.eq_s_b(1,"h")&&(x.bra=x.cursor,x.in_grouping_b(U,99,103)&&w()&&x.slice_del())}var g,p,k,h=[new r("",-1,7),new r("qu",0,6),new r("á",0,1),new r("é",0,2),new r("í",0,3),new r("ó",0,4),new r("ú",0,5)],q=[new r("",-1,3),new r("I",0,1),new r("U",0,2)],C=[new r("la",-1,-1),new r("cela",0,-1),new r("gliela",0,-1),new r("mela",0,-1),new r("tela",0,-1),new r("vela",0,-1),new r("le",-1,-1),new r("cele",6,-1),new r("gliele",6,-1),new r("mele",6,-1),new r("tele",6,-1),new r("vele",6,-1),new r("ne",-1,-1),new r("cene",12,-1),new r("gliene",12,-1),new r("mene",12,-1),new r("sene",12,-1),new r("tene",12,-1),new r("vene",12,-1),new r("ci",-1,-1),new r("li",-1,-1),new r("celi",20,-1),new r("glieli",20,-1),new r("meli",20,-1),new r("teli",20,-1),new r("veli",20,-1),new r("gli",20,-1),new r("mi",-1,-1),new r("si",-1,-1),new r("ti",-1,-1),new r("vi",-1,-1),new r("lo",-1,-1),new r("celo",31,-1),new r("glielo",31,-1),new r("melo",31,-1),new r("telo",31,-1),new r("velo",31,-1)],z=[new r("ando",-1,1),new r("endo",-1,1),new r("ar",-1,2),new r("er",-1,2),new r("ir",-1,2)],P=[new r("ic",-1,-1),new r("abil",-1,-1),new r("os",-1,-1),new r("iv",-1,1)],F=[new r("ic",-1,1),new r("abil",-1,1),new r("iv",-1,1)],S=[new r("ica",-1,1),new r("logia",-1,3),new r("osa",-1,1),new r("ista",-1,1),new r("iva",-1,9),new r("anza",-1,1),new r("enza",-1,5),new r("ice",-1,1),new r("atrice",7,1),new r("iche",-1,1),new r("logie",-1,3),new r("abile",-1,1),new r("ibile",-1,1),new r("usione",-1,4),new r("azione",-1,2),new r("uzione",-1,4),new r("atore",-1,2),new r("ose",-1,1),new r("ante",-1,1),new r("mente",-1,1),new r("amente",19,7),new r("iste",-1,1),new r("ive",-1,9),new r("anze",-1,1),new r("enze",-1,5),new r("ici",-1,1),new r("atrici",25,1),new r("ichi",-1,1),new r("abili",-1,1),new r("ibili",-1,1),new r("ismi",-1,1),new r("usioni",-1,4),new r("azioni",-1,2),new r("uzioni",-1,4),new r("atori",-1,2),new r("osi",-1,1),new r("anti",-1,1),new r("amenti",-1,6),new r("imenti",-1,6),new r("isti",-1,1),new r("ivi",-1,9),new r("ico",-1,1),new r("ismo",-1,1),new r("oso",-1,1),new r("amento",-1,6),new r("imento",-1,6),new r("ivo",-1,9),new r("ità",-1,8),new r("istà",-1,1),new r("istè",-1,1),new r("istì",-1,1)],W=[new r("isca",-1,1),new r("enda",-1,1),new r("ata",-1,1),new r("ita",-1,1),new r("uta",-1,1),new r("ava",-1,1),new r("eva",-1,1),new r("iva",-1,1),new r("erebbe",-1,1),new r("irebbe",-1,1),new r("isce",-1,1),new r("ende",-1,1),new r("are",-1,1),new r("ere",-1,1),new r("ire",-1,1),new r("asse",-1,1),new r("ate",-1,1),new r("avate",16,1),new r("evate",16,1),new r("ivate",16,1),new r("ete",-1,1),new r("erete",20,1),new r("irete",20,1),new r("ite",-1,1),new r("ereste",-1,1),new r("ireste",-1,1),new r("ute",-1,1),new r("erai",-1,1),new r("irai",-1,1),new r("isci",-1,1),new r("endi",-1,1),new r("erei",-1,1),new r("irei",-1,1),new r("assi",-1,1),new r("ati",-1,1),new r("iti",-1,1),new r("eresti",-1,1),new r("iresti",-1,1),new r("uti",-1,1),new r("avi",-1,1),new r("evi",-1,1),new r("ivi",-1,1),new r("isco",-1,1),new r("ando",-1,1),new r("endo",-1,1),new r("Yamo",-1,1),new r("iamo",-1,1),new r("avamo",-1,1),new r("evamo",-1,1),new r("ivamo",-1,1),new r("eremo",-1,1),new r("iremo",-1,1),new r("assimo",-1,1),new r("ammo",-1,1),new r("emmo",-1,1),new r("eremmo",54,1),new r("iremmo",54,1),new r("immo",-1,1),new r("ano",-1,1),new r("iscano",58,1),new r("avano",58,1),new r("evano",58,1),new r("ivano",58,1),new r("eranno",-1,1),new r("iranno",-1,1),new r("ono",-1,1),new r("iscono",65,1),new r("arono",65,1),new r("erono",65,1),new r("irono",65,1),new r("erebbero",-1,1),new r("irebbero",-1,1),new r("assero",-1,1),new r("essero",-1,1),new r("issero",-1,1),new r("ato",-1,1),new r("ito",-1,1),new r("uto",-1,1),new r("avo",-1,1),new r("evo",-1,1),new r("ivo",-1,1),new r("ar",-1,1),new r("ir",-1,1),new r("erà",-1,1),new r("irà",-1,1),new r("erò",-1,1),new r("irò",-1,1)],L=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,128,128,8,2,1],y=[17,65,0,0,0,0,0,0,0,0,0,0,0,0,0,128,128,8,2],U=[17],x=new n;this.setCurrent=function(e){x.setCurrent(e)},this.getCurrent=function(){return x.getCurrent()},this.stem=function(){var e=x.cursor;return i(),x.cursor=e,u(),x.limit_backward=e,x.cursor=x.limit,f(),x.cursor=x.limit,v()||(x.cursor=x.limit,b()),x.cursor=x.limit,_(),x.cursor=x.limit_backward,c(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}}(),e.Pipeline.registerFunction(e.it.stemmer,"stemmer-it"),e.it.stopWordFilter=e.generateStopWordFilter("a abbia abbiamo abbiano abbiate ad agl agli ai al all alla alle allo anche avemmo avendo avesse avessero avessi avessimo aveste avesti avete aveva avevamo avevano avevate avevi avevo avrai avranno avrebbe avrebbero avrei avremmo avremo avreste avresti avrete avrà avrò avuta avute avuti avuto c che chi ci coi col come con contro cui da dagl dagli dai dal dall dalla dalle dallo degl degli dei del dell della delle dello di dov dove e ebbe ebbero ebbi ed era erano eravamo eravate eri ero essendo faccia facciamo facciano facciate faccio facemmo facendo facesse facessero facessi facessimo faceste facesti faceva facevamo facevano facevate facevi facevo fai fanno farai faranno farebbe farebbero farei faremmo faremo fareste faresti farete farà farò fece fecero feci fosse fossero fossi fossimo foste fosti fu fui fummo furono gli ha hai hanno ho i il in io l la le lei li lo loro lui ma mi mia mie miei mio ne negl negli nei nel nell nella nelle nello noi non nostra nostre nostri nostro o per perché più quale quanta quante quanti quanto quella quelle quelli quello questa queste questi questo sarai saranno sarebbe sarebbero sarei saremmo saremo sareste saresti sarete sarà sarò se sei si sia siamo siano siate siete sono sta stai stando stanno starai staranno starebbe starebbero starei staremmo staremo stareste staresti starete starà starò stava stavamo stavano stavate stavi stavo stemmo stesse stessero stessi stessimo steste stesti stette stettero stetti stia stiamo stiano stiate sto su sua sue sugl sugli sui sul sull sulla sulle sullo suo suoi ti tra tu tua tue tuo tuoi tutti tutto un una uno vi voi vostra vostre vostri vostro è".split(" ")),e.Pipeline.registerFunction(e.it.stopWordFilter,"stopWordFilter-it")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.ja.min.js b/assets/javascripts/lunr/min/lunr.ja.min.js new file mode 100644 index 000000000..5f254ebe9 --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.ja.min.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var r="2"==e.version[0];e.ja=function(){this.pipeline.reset(),this.pipeline.add(e.ja.trimmer,e.ja.stopWordFilter,e.ja.stemmer),r?this.tokenizer=e.ja.tokenizer:(e.tokenizer&&(e.tokenizer=e.ja.tokenizer),this.tokenizerFn&&(this.tokenizerFn=e.ja.tokenizer))};var t=new e.TinySegmenter;e.ja.tokenizer=function(i){var n,o,s,p,a,u,m,l,c,f;if(!arguments.length||null==i||void 0==i)return[];if(Array.isArray(i))return i.map(function(t){return r?new e.Token(t.toLowerCase()):t.toLowerCase()});for(o=i.toString().toLowerCase().replace(/^\s+/,""),n=o.length-1;n>=0;n--)if(/\S/.test(o.charAt(n))){o=o.substring(0,n+1);break}for(a=[],s=o.length,c=0,l=0;c<=s;c++)if(u=o.charAt(c),m=c-l,u.match(/\s/)||c==s){if(m>0)for(p=t.segment(o.slice(l,c)).filter(function(e){return!!e}),f=l,n=0;n=C.limit)break;C.cursor++;continue}break}for(C.cursor=o,C.bra=o,C.eq_s(1,"y")?(C.ket=C.cursor,C.slice_from("Y")):C.cursor=o;;)if(e=C.cursor,C.in_grouping(q,97,232)){if(i=C.cursor,C.bra=i,C.eq_s(1,"i"))C.ket=C.cursor,C.in_grouping(q,97,232)&&(C.slice_from("I"),C.cursor=e);else if(C.cursor=i,C.eq_s(1,"y"))C.ket=C.cursor,C.slice_from("Y"),C.cursor=e;else if(n(e))break}else if(n(e))break}function n(r){return C.cursor=r,r>=C.limit||(C.cursor++,!1)}function o(){_=C.limit,d=_,t()||(_=C.cursor,_<3&&(_=3),t()||(d=C.cursor))}function t(){for(;!C.in_grouping(q,97,232);){if(C.cursor>=C.limit)return!0;C.cursor++}for(;!C.out_grouping(q,97,232);){if(C.cursor>=C.limit)return!0;C.cursor++}return!1}function s(){for(var r;;)if(C.bra=C.cursor,r=C.find_among(p,3))switch(C.ket=C.cursor,r){case 1:C.slice_from("y");break;case 2:C.slice_from("i");break;case 3:if(C.cursor>=C.limit)return;C.cursor++}}function u(){return _<=C.cursor}function c(){return d<=C.cursor}function a(){var r=C.limit-C.cursor;C.find_among_b(g,3)&&(C.cursor=C.limit-r,C.ket=C.cursor,C.cursor>C.limit_backward&&(C.cursor--,C.bra=C.cursor,C.slice_del()))}function l(){var r;w=!1,C.ket=C.cursor,C.eq_s_b(1,"e")&&(C.bra=C.cursor,u()&&(r=C.limit-C.cursor,C.out_grouping_b(q,97,232)&&(C.cursor=C.limit-r,C.slice_del(),w=!0,a())))}function m(){var r;u()&&(r=C.limit-C.cursor,C.out_grouping_b(q,97,232)&&(C.cursor=C.limit-r,C.eq_s_b(3,"gem")||(C.cursor=C.limit-r,C.slice_del(),a())))}function f(){var r,e,i,n,o,t,s=C.limit-C.cursor;if(C.ket=C.cursor,r=C.find_among_b(h,5))switch(C.bra=C.cursor,r){case 1:u()&&C.slice_from("heid");break;case 2:m();break;case 3:u()&&C.out_grouping_b(j,97,232)&&C.slice_del()}if(C.cursor=C.limit-s,l(),C.cursor=C.limit-s,C.ket=C.cursor,C.eq_s_b(4,"heid")&&(C.bra=C.cursor,c()&&(e=C.limit-C.cursor,C.eq_s_b(1,"c")||(C.cursor=C.limit-e,C.slice_del(),C.ket=C.cursor,C.eq_s_b(2,"en")&&(C.bra=C.cursor,m())))),C.cursor=C.limit-s,C.ket=C.cursor,r=C.find_among_b(k,6))switch(C.bra=C.cursor,r){case 1:if(c()){if(C.slice_del(),i=C.limit-C.cursor,C.ket=C.cursor,C.eq_s_b(2,"ig")&&(C.bra=C.cursor,c()&&(n=C.limit-C.cursor,!C.eq_s_b(1,"e")))){C.cursor=C.limit-n,C.slice_del();break}C.cursor=C.limit-i,a()}break;case 2:c()&&(o=C.limit-C.cursor,C.eq_s_b(1,"e")||(C.cursor=C.limit-o,C.slice_del()));break;case 3:c()&&(C.slice_del(),l());break;case 4:c()&&C.slice_del();break;case 5:c()&&w&&C.slice_del()}C.cursor=C.limit-s,C.out_grouping_b(z,73,232)&&(t=C.limit-C.cursor,C.find_among_b(v,4)&&C.out_grouping_b(q,97,232)&&(C.cursor=C.limit-t,C.ket=C.cursor,C.cursor>C.limit_backward&&(C.cursor--,C.bra=C.cursor,C.slice_del())))}var d,_,w,b=[new e("",-1,6),new e("á",0,1),new e("ä",0,1),new e("é",0,2),new e("ë",0,2),new e("í",0,3),new e("ï",0,3),new e("ó",0,4),new e("ö",0,4),new e("ú",0,5),new e("ü",0,5)],p=[new e("",-1,3),new e("I",0,2),new e("Y",0,1)],g=[new e("dd",-1,-1),new e("kk",-1,-1),new e("tt",-1,-1)],h=[new e("ene",-1,2),new e("se",-1,3),new e("en",-1,2),new e("heden",2,1),new e("s",-1,3)],k=[new e("end",-1,1),new e("ig",-1,2),new e("ing",-1,1),new e("lijk",-1,3),new e("baar",-1,4),new e("bar",-1,5)],v=[new e("aa",-1,-1),new e("ee",-1,-1),new e("oo",-1,-1),new e("uu",-1,-1)],q=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],z=[1,0,0,17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],j=[17,67,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128],C=new i;this.setCurrent=function(r){C.setCurrent(r)},this.getCurrent=function(){return C.getCurrent()},this.stem=function(){var e=C.cursor;return r(),C.cursor=e,o(),C.limit_backward=e,C.cursor=C.limit,f(),C.cursor=C.limit_backward,s(),!0}};return function(r){return"function"==typeof r.update?r.update(function(r){return n.setCurrent(r),n.stem(),n.getCurrent()}):(n.setCurrent(r),n.stem(),n.getCurrent())}}(),r.Pipeline.registerFunction(r.nl.stemmer,"stemmer-nl"),r.nl.stopWordFilter=r.generateStopWordFilter(" aan al alles als altijd andere ben bij daar dan dat de der deze die dit doch doen door dus een eens en er ge geen geweest haar had heb hebben heeft hem het hier hij hoe hun iemand iets ik in is ja je kan kon kunnen maar me meer men met mij mijn moet na naar niet niets nog nu of om omdat onder ons ook op over reeds te tegen toch toen tot u uit uw van veel voor want waren was wat werd wezen wie wil worden wordt zal ze zelf zich zij zijn zo zonder zou".split(" ")),r.Pipeline.registerFunction(r.nl.stopWordFilter,"stopWordFilter-nl")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.no.min.js b/assets/javascripts/lunr/min/lunr.no.min.js new file mode 100644 index 000000000..92bc7e4e8 --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.no.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `Norwegian` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.no=function(){this.pipeline.reset(),this.pipeline.add(e.no.trimmer,e.no.stopWordFilter,e.no.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.no.stemmer))},e.no.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.no.trimmer=e.trimmerSupport.generateTrimmer(e.no.wordCharacters),e.Pipeline.registerFunction(e.no.trimmer,"trimmer-no"),e.no.stemmer=function(){var r=e.stemmerSupport.Among,n=e.stemmerSupport.SnowballProgram,i=new function(){function e(){var e,r=w.cursor+3;if(a=w.limit,0<=r||r<=w.limit){for(s=r;;){if(e=w.cursor,w.in_grouping(d,97,248)){w.cursor=e;break}if(e>=w.limit)return;w.cursor=e+1}for(;!w.out_grouping(d,97,248);){if(w.cursor>=w.limit)return;w.cursor++}a=w.cursor,a=a&&(r=w.limit_backward,w.limit_backward=a,w.ket=w.cursor,e=w.find_among_b(m,29),w.limit_backward=r,e))switch(w.bra=w.cursor,e){case 1:w.slice_del();break;case 2:n=w.limit-w.cursor,w.in_grouping_b(c,98,122)?w.slice_del():(w.cursor=w.limit-n,w.eq_s_b(1,"k")&&w.out_grouping_b(d,97,248)&&w.slice_del());break;case 3:w.slice_from("er")}}function t(){var e,r=w.limit-w.cursor;w.cursor>=a&&(e=w.limit_backward,w.limit_backward=a,w.ket=w.cursor,w.find_among_b(u,2)?(w.bra=w.cursor,w.limit_backward=e,w.cursor=w.limit-r,w.cursor>w.limit_backward&&(w.cursor--,w.bra=w.cursor,w.slice_del())):w.limit_backward=e)}function o(){var e,r;w.cursor>=a&&(r=w.limit_backward,w.limit_backward=a,w.ket=w.cursor,e=w.find_among_b(l,11),e?(w.bra=w.cursor,w.limit_backward=r,1==e&&w.slice_del()):w.limit_backward=r)}var s,a,m=[new r("a",-1,1),new r("e",-1,1),new r("ede",1,1),new r("ande",1,1),new r("ende",1,1),new r("ane",1,1),new r("ene",1,1),new r("hetene",6,1),new r("erte",1,3),new r("en",-1,1),new r("heten",9,1),new r("ar",-1,1),new r("er",-1,1),new r("heter",12,1),new r("s",-1,2),new r("as",14,1),new r("es",14,1),new r("edes",16,1),new r("endes",16,1),new r("enes",16,1),new r("hetenes",19,1),new r("ens",14,1),new r("hetens",21,1),new r("ers",14,1),new r("ets",14,1),new r("et",-1,1),new r("het",25,1),new r("ert",-1,3),new r("ast",-1,1)],u=[new r("dt",-1,-1),new r("vt",-1,-1)],l=[new r("leg",-1,1),new r("eleg",0,1),new r("ig",-1,1),new r("eig",2,1),new r("lig",2,1),new r("elig",4,1),new r("els",-1,1),new r("lov",-1,1),new r("elov",7,1),new r("slov",7,1),new r("hetslov",9,1)],d=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,48,0,128],c=[119,125,149,1],w=new n;this.setCurrent=function(e){w.setCurrent(e)},this.getCurrent=function(){return w.getCurrent()},this.stem=function(){var r=w.cursor;return e(),w.limit_backward=r,w.cursor=w.limit,i(),w.cursor=w.limit,t(),w.cursor=w.limit,o(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return i.setCurrent(e),i.stem(),i.getCurrent()}):(i.setCurrent(e),i.stem(),i.getCurrent())}}(),e.Pipeline.registerFunction(e.no.stemmer,"stemmer-no"),e.no.stopWordFilter=e.generateStopWordFilter("alle at av bare begge ble blei bli blir blitt både båe da de deg dei deim deira deires dem den denne der dere deres det dette di din disse ditt du dykk dykkar då eg ein eit eitt eller elles en enn er et ett etter for fordi fra før ha hadde han hans har hennar henne hennes her hjå ho hoe honom hoss hossen hun hva hvem hver hvilke hvilken hvis hvor hvordan hvorfor i ikke ikkje ikkje ingen ingi inkje inn inni ja jeg kan kom korleis korso kun kunne kva kvar kvarhelst kven kvi kvifor man mange me med medan meg meget mellom men mi min mine mitt mot mykje ned no noe noen noka noko nokon nokor nokre nå når og også om opp oss over på samme seg selv si si sia sidan siden sin sine sitt sjøl skal skulle slik so som som somme somt så sånn til um upp ut uten var vart varte ved vere verte vi vil ville vore vors vort vår være være vært å".split(" ")),e.Pipeline.registerFunction(e.no.stopWordFilter,"stopWordFilter-no")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.pt.min.js b/assets/javascripts/lunr/min/lunr.pt.min.js new file mode 100644 index 000000000..6c16996d6 --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.pt.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `Portuguese` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.pt=function(){this.pipeline.reset(),this.pipeline.add(e.pt.trimmer,e.pt.stopWordFilter,e.pt.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.pt.stemmer))},e.pt.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.pt.trimmer=e.trimmerSupport.generateTrimmer(e.pt.wordCharacters),e.Pipeline.registerFunction(e.pt.trimmer,"trimmer-pt"),e.pt.stemmer=function(){var r=e.stemmerSupport.Among,s=e.stemmerSupport.SnowballProgram,n=new function(){function e(){for(var e;;){if(z.bra=z.cursor,e=z.find_among(k,3))switch(z.ket=z.cursor,e){case 1:z.slice_from("a~");continue;case 2:z.slice_from("o~");continue;case 3:if(z.cursor>=z.limit)break;z.cursor++;continue}break}}function n(){if(z.out_grouping(y,97,250)){for(;!z.in_grouping(y,97,250);){if(z.cursor>=z.limit)return!0;z.cursor++}return!1}return!0}function i(){if(z.in_grouping(y,97,250))for(;!z.out_grouping(y,97,250);){if(z.cursor>=z.limit)return!1;z.cursor++}return g=z.cursor,!0}function o(){var e,r,s=z.cursor;if(z.in_grouping(y,97,250))if(e=z.cursor,n()){if(z.cursor=e,i())return}else g=z.cursor;if(z.cursor=s,z.out_grouping(y,97,250)){if(r=z.cursor,n()){if(z.cursor=r,!z.in_grouping(y,97,250)||z.cursor>=z.limit)return;z.cursor++}g=z.cursor}}function t(){for(;!z.in_grouping(y,97,250);){if(z.cursor>=z.limit)return!1;z.cursor++}for(;!z.out_grouping(y,97,250);){if(z.cursor>=z.limit)return!1;z.cursor++}return!0}function a(){var e=z.cursor;g=z.limit,b=g,h=g,o(),z.cursor=e,t()&&(b=z.cursor,t()&&(h=z.cursor))}function u(){for(var e;;){if(z.bra=z.cursor,e=z.find_among(q,3))switch(z.ket=z.cursor,e){case 1:z.slice_from("ã");continue;case 2:z.slice_from("õ");continue;case 3:if(z.cursor>=z.limit)break;z.cursor++;continue}break}}function w(){return g<=z.cursor}function m(){return b<=z.cursor}function c(){return h<=z.cursor}function l(){var e;if(z.ket=z.cursor,!(e=z.find_among_b(F,45)))return!1;switch(z.bra=z.cursor,e){case 1:if(!c())return!1;z.slice_del();break;case 2:if(!c())return!1;z.slice_from("log");break;case 3:if(!c())return!1;z.slice_from("u");break;case 4:if(!c())return!1;z.slice_from("ente");break;case 5:if(!m())return!1;z.slice_del(),z.ket=z.cursor,e=z.find_among_b(j,4),e&&(z.bra=z.cursor,c()&&(z.slice_del(),1==e&&(z.ket=z.cursor,z.eq_s_b(2,"at")&&(z.bra=z.cursor,c()&&z.slice_del()))));break;case 6:if(!c())return!1;z.slice_del(),z.ket=z.cursor,e=z.find_among_b(C,3),e&&(z.bra=z.cursor,1==e&&c()&&z.slice_del());break;case 7:if(!c())return!1;z.slice_del(),z.ket=z.cursor,e=z.find_among_b(P,3),e&&(z.bra=z.cursor,1==e&&c()&&z.slice_del());break;case 8:if(!c())return!1;z.slice_del(),z.ket=z.cursor,z.eq_s_b(2,"at")&&(z.bra=z.cursor,c()&&z.slice_del());break;case 9:if(!w()||!z.eq_s_b(1,"e"))return!1;z.slice_from("ir")}return!0}function f(){var e,r;if(z.cursor>=g){if(r=z.limit_backward,z.limit_backward=g,z.ket=z.cursor,e=z.find_among_b(S,120))return z.bra=z.cursor,1==e&&z.slice_del(),z.limit_backward=r,!0;z.limit_backward=r}return!1}function d(){var e;z.ket=z.cursor,(e=z.find_among_b(W,7))&&(z.bra=z.cursor,1==e&&w()&&z.slice_del())}function v(e,r){if(z.eq_s_b(1,e)){z.bra=z.cursor;var s=z.limit-z.cursor;if(z.eq_s_b(1,r))return z.cursor=z.limit-s,w()&&z.slice_del(),!1}return!0}function p(){var e;if(z.ket=z.cursor,e=z.find_among_b(L,4))switch(z.bra=z.cursor,e){case 1:w()&&(z.slice_del(),z.ket=z.cursor,z.limit-z.cursor,v("u","g")&&v("i","c"));break;case 2:z.slice_from("c")}}function _(){if(!l()&&(z.cursor=z.limit,!f()))return z.cursor=z.limit,void d();z.cursor=z.limit,z.ket=z.cursor,z.eq_s_b(1,"i")&&(z.bra=z.cursor,z.eq_s_b(1,"c")&&(z.cursor=z.limit,w()&&z.slice_del()))}var h,b,g,k=[new r("",-1,3),new r("ã",0,1),new r("õ",0,2)],q=[new r("",-1,3),new r("a~",0,1),new r("o~",0,2)],j=[new r("ic",-1,-1),new r("ad",-1,-1),new r("os",-1,-1),new r("iv",-1,1)],C=[new r("ante",-1,1),new r("avel",-1,1),new r("ível",-1,1)],P=[new r("ic",-1,1),new r("abil",-1,1),new r("iv",-1,1)],F=[new r("ica",-1,1),new r("ância",-1,1),new r("ência",-1,4),new r("ira",-1,9),new r("adora",-1,1),new r("osa",-1,1),new r("ista",-1,1),new r("iva",-1,8),new r("eza",-1,1),new r("logía",-1,2),new r("idade",-1,7),new r("ante",-1,1),new r("mente",-1,6),new r("amente",12,5),new r("ável",-1,1),new r("ível",-1,1),new r("ución",-1,3),new r("ico",-1,1),new r("ismo",-1,1),new r("oso",-1,1),new r("amento",-1,1),new r("imento",-1,1),new r("ivo",-1,8),new r("aça~o",-1,1),new r("ador",-1,1),new r("icas",-1,1),new r("ências",-1,4),new r("iras",-1,9),new r("adoras",-1,1),new r("osas",-1,1),new r("istas",-1,1),new r("ivas",-1,8),new r("ezas",-1,1),new r("logías",-1,2),new r("idades",-1,7),new r("uciones",-1,3),new r("adores",-1,1),new r("antes",-1,1),new r("aço~es",-1,1),new r("icos",-1,1),new r("ismos",-1,1),new r("osos",-1,1),new r("amentos",-1,1),new r("imentos",-1,1),new r("ivos",-1,8)],S=[new r("ada",-1,1),new r("ida",-1,1),new r("ia",-1,1),new r("aria",2,1),new r("eria",2,1),new r("iria",2,1),new r("ara",-1,1),new r("era",-1,1),new r("ira",-1,1),new r("ava",-1,1),new r("asse",-1,1),new r("esse",-1,1),new r("isse",-1,1),new r("aste",-1,1),new r("este",-1,1),new r("iste",-1,1),new r("ei",-1,1),new r("arei",16,1),new r("erei",16,1),new r("irei",16,1),new r("am",-1,1),new r("iam",20,1),new r("ariam",21,1),new r("eriam",21,1),new r("iriam",21,1),new r("aram",20,1),new r("eram",20,1),new r("iram",20,1),new r("avam",20,1),new r("em",-1,1),new r("arem",29,1),new r("erem",29,1),new r("irem",29,1),new r("assem",29,1),new r("essem",29,1),new r("issem",29,1),new r("ado",-1,1),new r("ido",-1,1),new r("ando",-1,1),new r("endo",-1,1),new r("indo",-1,1),new r("ara~o",-1,1),new r("era~o",-1,1),new r("ira~o",-1,1),new r("ar",-1,1),new r("er",-1,1),new r("ir",-1,1),new r("as",-1,1),new r("adas",47,1),new r("idas",47,1),new r("ias",47,1),new r("arias",50,1),new r("erias",50,1),new r("irias",50,1),new r("aras",47,1),new r("eras",47,1),new r("iras",47,1),new r("avas",47,1),new r("es",-1,1),new r("ardes",58,1),new r("erdes",58,1),new r("irdes",58,1),new r("ares",58,1),new r("eres",58,1),new r("ires",58,1),new r("asses",58,1),new r("esses",58,1),new r("isses",58,1),new r("astes",58,1),new r("estes",58,1),new r("istes",58,1),new r("is",-1,1),new r("ais",71,1),new r("eis",71,1),new r("areis",73,1),new r("ereis",73,1),new r("ireis",73,1),new r("áreis",73,1),new r("éreis",73,1),new r("íreis",73,1),new r("ásseis",73,1),new r("ésseis",73,1),new r("ísseis",73,1),new r("áveis",73,1),new r("íeis",73,1),new r("aríeis",84,1),new r("eríeis",84,1),new r("iríeis",84,1),new r("ados",-1,1),new r("idos",-1,1),new r("amos",-1,1),new r("áramos",90,1),new r("éramos",90,1),new r("íramos",90,1),new r("ávamos",90,1),new r("íamos",90,1),new r("aríamos",95,1),new r("eríamos",95,1),new r("iríamos",95,1),new r("emos",-1,1),new r("aremos",99,1),new r("eremos",99,1),new r("iremos",99,1),new r("ássemos",99,1),new r("êssemos",99,1),new r("íssemos",99,1),new r("imos",-1,1),new r("armos",-1,1),new r("ermos",-1,1),new r("irmos",-1,1),new r("ámos",-1,1),new r("arás",-1,1),new r("erás",-1,1),new r("irás",-1,1),new r("eu",-1,1),new r("iu",-1,1),new r("ou",-1,1),new r("ará",-1,1),new r("erá",-1,1),new r("irá",-1,1)],W=[new r("a",-1,1),new r("i",-1,1),new r("o",-1,1),new r("os",-1,1),new r("á",-1,1),new r("í",-1,1),new r("ó",-1,1)],L=[new r("e",-1,1),new r("ç",-1,2),new r("é",-1,1),new r("ê",-1,1)],y=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,3,19,12,2],z=new s;this.setCurrent=function(e){z.setCurrent(e)},this.getCurrent=function(){return z.getCurrent()},this.stem=function(){var r=z.cursor;return e(),z.cursor=r,a(),z.limit_backward=r,z.cursor=z.limit,_(),z.cursor=z.limit,p(),z.cursor=z.limit_backward,u(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}}(),e.Pipeline.registerFunction(e.pt.stemmer,"stemmer-pt"),e.pt.stopWordFilter=e.generateStopWordFilter("a ao aos aquela aquelas aquele aqueles aquilo as até com como da das de dela delas dele deles depois do dos e ela elas ele eles em entre era eram essa essas esse esses esta estamos estas estava estavam este esteja estejam estejamos estes esteve estive estivemos estiver estivera estiveram estiverem estivermos estivesse estivessem estivéramos estivéssemos estou está estávamos estão eu foi fomos for fora foram forem formos fosse fossem fui fôramos fôssemos haja hajam hajamos havemos hei houve houvemos houver houvera houveram houverei houverem houveremos houveria houveriam houvermos houverá houverão houveríamos houvesse houvessem houvéramos houvéssemos há hão isso isto já lhe lhes mais mas me mesmo meu meus minha minhas muito na nas nem no nos nossa nossas nosso nossos num numa não nós o os ou para pela pelas pelo pelos por qual quando que quem se seja sejam sejamos sem serei seremos seria seriam será serão seríamos seu seus somos sou sua suas são só também te tem temos tenha tenham tenhamos tenho terei teremos teria teriam terá terão teríamos teu teus teve tinha tinham tive tivemos tiver tivera tiveram tiverem tivermos tivesse tivessem tivéramos tivéssemos tu tua tuas tém tínhamos um uma você vocês vos à às éramos".split(" ")),e.Pipeline.registerFunction(e.pt.stopWordFilter,"stopWordFilter-pt")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.ro.min.js b/assets/javascripts/lunr/min/lunr.ro.min.js new file mode 100644 index 000000000..727714018 --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.ro.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `Romanian` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(e,i){"function"==typeof define&&define.amd?define(i):"object"==typeof exports?module.exports=i():i()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.ro=function(){this.pipeline.reset(),this.pipeline.add(e.ro.trimmer,e.ro.stopWordFilter,e.ro.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.ro.stemmer))},e.ro.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.ro.trimmer=e.trimmerSupport.generateTrimmer(e.ro.wordCharacters),e.Pipeline.registerFunction(e.ro.trimmer,"trimmer-ro"),e.ro.stemmer=function(){var i=e.stemmerSupport.Among,r=e.stemmerSupport.SnowballProgram,n=new function(){function e(e,i){L.eq_s(1,e)&&(L.ket=L.cursor,L.in_grouping(W,97,259)&&L.slice_from(i))}function n(){for(var i,r;;){if(i=L.cursor,L.in_grouping(W,97,259)&&(r=L.cursor,L.bra=r,e("u","U"),L.cursor=r,e("i","I")),L.cursor=i,L.cursor>=L.limit)break;L.cursor++}}function t(){if(L.out_grouping(W,97,259)){for(;!L.in_grouping(W,97,259);){if(L.cursor>=L.limit)return!0;L.cursor++}return!1}return!0}function a(){if(L.in_grouping(W,97,259))for(;!L.out_grouping(W,97,259);){if(L.cursor>=L.limit)return!0;L.cursor++}return!1}function o(){var e,i,r=L.cursor;if(L.in_grouping(W,97,259)){if(e=L.cursor,!t())return void(h=L.cursor);if(L.cursor=e,!a())return void(h=L.cursor)}L.cursor=r,L.out_grouping(W,97,259)&&(i=L.cursor,t()&&(L.cursor=i,L.in_grouping(W,97,259)&&L.cursor=L.limit)return!1;L.cursor++}for(;!L.out_grouping(W,97,259);){if(L.cursor>=L.limit)return!1;L.cursor++}return!0}function c(){var e=L.cursor;h=L.limit,k=h,g=h,o(),L.cursor=e,u()&&(k=L.cursor,u()&&(g=L.cursor))}function s(){for(var e;;){if(L.bra=L.cursor,e=L.find_among(z,3))switch(L.ket=L.cursor,e){case 1:L.slice_from("i");continue;case 2:L.slice_from("u");continue;case 3:if(L.cursor>=L.limit)break;L.cursor++;continue}break}}function w(){return h<=L.cursor}function m(){return k<=L.cursor}function l(){return g<=L.cursor}function f(){var e,i;if(L.ket=L.cursor,(e=L.find_among_b(C,16))&&(L.bra=L.cursor,m()))switch(e){case 1:L.slice_del();break;case 2:L.slice_from("a");break;case 3:L.slice_from("e");break;case 4:L.slice_from("i");break;case 5:i=L.limit-L.cursor,L.eq_s_b(2,"ab")||(L.cursor=L.limit-i,L.slice_from("i"));break;case 6:L.slice_from("at");break;case 7:L.slice_from("aţi")}}function p(){var e,i=L.limit-L.cursor;if(L.ket=L.cursor,(e=L.find_among_b(P,46))&&(L.bra=L.cursor,m())){switch(e){case 1:L.slice_from("abil");break;case 2:L.slice_from("ibil");break;case 3:L.slice_from("iv");break;case 4:L.slice_from("ic");break;case 5:L.slice_from("at");break;case 6:L.slice_from("it")}return _=!0,L.cursor=L.limit-i,!0}return!1}function d(){var e,i;for(_=!1;;)if(i=L.limit-L.cursor,!p()){L.cursor=L.limit-i;break}if(L.ket=L.cursor,(e=L.find_among_b(F,62))&&(L.bra=L.cursor,l())){switch(e){case 1:L.slice_del();break;case 2:L.eq_s_b(1,"ţ")&&(L.bra=L.cursor,L.slice_from("t"));break;case 3:L.slice_from("ist")}_=!0}}function b(){var e,i,r;if(L.cursor>=h){if(i=L.limit_backward,L.limit_backward=h,L.ket=L.cursor,e=L.find_among_b(q,94))switch(L.bra=L.cursor,e){case 1:if(r=L.limit-L.cursor,!L.out_grouping_b(W,97,259)&&(L.cursor=L.limit-r,!L.eq_s_b(1,"u")))break;case 2:L.slice_del()}L.limit_backward=i}}function v(){var e;L.ket=L.cursor,(e=L.find_among_b(S,5))&&(L.bra=L.cursor,w()&&1==e&&L.slice_del())}var _,g,k,h,z=[new i("",-1,3),new i("I",0,1),new i("U",0,2)],C=[new i("ea",-1,3),new i("aţia",-1,7),new i("aua",-1,2),new i("iua",-1,4),new i("aţie",-1,7),new i("ele",-1,3),new i("ile",-1,5),new i("iile",6,4),new i("iei",-1,4),new i("atei",-1,6),new i("ii",-1,4),new i("ului",-1,1),new i("ul",-1,1),new i("elor",-1,3),new i("ilor",-1,4),new i("iilor",14,4)],P=[new i("icala",-1,4),new i("iciva",-1,4),new i("ativa",-1,5),new i("itiva",-1,6),new i("icale",-1,4),new i("aţiune",-1,5),new i("iţiune",-1,6),new i("atoare",-1,5),new i("itoare",-1,6),new i("ătoare",-1,5),new i("icitate",-1,4),new i("abilitate",-1,1),new i("ibilitate",-1,2),new i("ivitate",-1,3),new i("icive",-1,4),new i("ative",-1,5),new i("itive",-1,6),new i("icali",-1,4),new i("atori",-1,5),new i("icatori",18,4),new i("itori",-1,6),new i("ători",-1,5),new i("icitati",-1,4),new i("abilitati",-1,1),new i("ivitati",-1,3),new i("icivi",-1,4),new i("ativi",-1,5),new i("itivi",-1,6),new i("icităi",-1,4),new i("abilităi",-1,1),new i("ivităi",-1,3),new i("icităţi",-1,4),new i("abilităţi",-1,1),new i("ivităţi",-1,3),new i("ical",-1,4),new i("ator",-1,5),new i("icator",35,4),new i("itor",-1,6),new i("ător",-1,5),new i("iciv",-1,4),new i("ativ",-1,5),new i("itiv",-1,6),new i("icală",-1,4),new i("icivă",-1,4),new i("ativă",-1,5),new i("itivă",-1,6)],F=[new i("ica",-1,1),new i("abila",-1,1),new i("ibila",-1,1),new i("oasa",-1,1),new i("ata",-1,1),new i("ita",-1,1),new i("anta",-1,1),new i("ista",-1,3),new i("uta",-1,1),new i("iva",-1,1),new i("ic",-1,1),new i("ice",-1,1),new i("abile",-1,1),new i("ibile",-1,1),new i("isme",-1,3),new i("iune",-1,2),new i("oase",-1,1),new i("ate",-1,1),new i("itate",17,1),new i("ite",-1,1),new i("ante",-1,1),new i("iste",-1,3),new i("ute",-1,1),new i("ive",-1,1),new i("ici",-1,1),new i("abili",-1,1),new i("ibili",-1,1),new i("iuni",-1,2),new i("atori",-1,1),new i("osi",-1,1),new i("ati",-1,1),new i("itati",30,1),new i("iti",-1,1),new i("anti",-1,1),new i("isti",-1,3),new i("uti",-1,1),new i("işti",-1,3),new i("ivi",-1,1),new i("ităi",-1,1),new i("oşi",-1,1),new i("ităţi",-1,1),new i("abil",-1,1),new i("ibil",-1,1),new i("ism",-1,3),new i("ator",-1,1),new i("os",-1,1),new i("at",-1,1),new i("it",-1,1),new i("ant",-1,1),new i("ist",-1,3),new i("ut",-1,1),new i("iv",-1,1),new i("ică",-1,1),new i("abilă",-1,1),new i("ibilă",-1,1),new i("oasă",-1,1),new i("ată",-1,1),new i("ită",-1,1),new i("antă",-1,1),new i("istă",-1,3),new i("ută",-1,1),new i("ivă",-1,1)],q=[new i("ea",-1,1),new i("ia",-1,1),new i("esc",-1,1),new i("ăsc",-1,1),new i("ind",-1,1),new i("ând",-1,1),new i("are",-1,1),new i("ere",-1,1),new i("ire",-1,1),new i("âre",-1,1),new i("se",-1,2),new i("ase",10,1),new i("sese",10,2),new i("ise",10,1),new i("use",10,1),new i("âse",10,1),new i("eşte",-1,1),new i("ăşte",-1,1),new i("eze",-1,1),new i("ai",-1,1),new i("eai",19,1),new i("iai",19,1),new i("sei",-1,2),new i("eşti",-1,1),new i("ăşti",-1,1),new i("ui",-1,1),new i("ezi",-1,1),new i("âi",-1,1),new i("aşi",-1,1),new i("seşi",-1,2),new i("aseşi",29,1),new i("seseşi",29,2),new i("iseşi",29,1),new i("useşi",29,1),new i("âseşi",29,1),new i("işi",-1,1),new i("uşi",-1,1),new i("âşi",-1,1),new i("aţi",-1,2),new i("eaţi",38,1),new i("iaţi",38,1),new i("eţi",-1,2),new i("iţi",-1,2),new i("âţi",-1,2),new i("arăţi",-1,1),new i("serăţi",-1,2),new i("aserăţi",45,1),new i("seserăţi",45,2),new i("iserăţi",45,1),new i("userăţi",45,1),new i("âserăţi",45,1),new i("irăţi",-1,1),new i("urăţi",-1,1),new i("ârăţi",-1,1),new i("am",-1,1),new i("eam",54,1),new i("iam",54,1),new i("em",-1,2),new i("asem",57,1),new i("sesem",57,2),new i("isem",57,1),new i("usem",57,1),new i("âsem",57,1),new i("im",-1,2),new i("âm",-1,2),new i("ăm",-1,2),new i("arăm",65,1),new i("serăm",65,2),new i("aserăm",67,1),new i("seserăm",67,2),new i("iserăm",67,1),new i("userăm",67,1),new i("âserăm",67,1),new i("irăm",65,1),new i("urăm",65,1),new i("ârăm",65,1),new i("au",-1,1),new i("eau",76,1),new i("iau",76,1),new i("indu",-1,1),new i("ându",-1,1),new i("ez",-1,1),new i("ească",-1,1),new i("ară",-1,1),new i("seră",-1,2),new i("aseră",84,1),new i("seseră",84,2),new i("iseră",84,1),new i("useră",84,1),new i("âseră",84,1),new i("iră",-1,1),new i("ură",-1,1),new i("âră",-1,1),new i("ează",-1,1)],S=[new i("a",-1,1),new i("e",-1,1),new i("ie",1,1),new i("i",-1,1),new i("ă",-1,1)],W=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,2,32,0,0,4],L=new r;this.setCurrent=function(e){L.setCurrent(e)},this.getCurrent=function(){return L.getCurrent()},this.stem=function(){var e=L.cursor;return n(),L.cursor=e,c(),L.limit_backward=e,L.cursor=L.limit,f(),L.cursor=L.limit,d(),L.cursor=L.limit,_||(L.cursor=L.limit,b(),L.cursor=L.limit),v(),L.cursor=L.limit_backward,s(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return n.setCurrent(e),n.stem(),n.getCurrent()}):(n.setCurrent(e),n.stem(),n.getCurrent())}}(),e.Pipeline.registerFunction(e.ro.stemmer,"stemmer-ro"),e.ro.stopWordFilter=e.generateStopWordFilter("acea aceasta această aceea acei aceia acel acela acele acelea acest acesta aceste acestea aceşti aceştia acolo acord acum ai aia aibă aici al ale alea altceva altcineva am ar are asemenea asta astea astăzi asupra au avea avem aveţi azi aş aşadar aţi bine bucur bună ca care caut ce cel ceva chiar cinci cine cineva contra cu cum cumva curând curînd când cât câte câtva câţi cînd cît cîte cîtva cîţi că căci cărei căror cărui către da dacă dar datorită dată dau de deci deja deoarece departe deşi din dinaintea dintr- dintre doi doilea două drept după dă ea ei el ele eram este eu eşti face fata fi fie fiecare fii fim fiu fiţi frumos fără graţie halbă iar ieri la le li lor lui lângă lîngă mai mea mei mele mereu meu mi mie mine mult multă mulţi mulţumesc mâine mîine mă ne nevoie nici nicăieri nimeni nimeri nimic nişte noastre noastră noi noroc nostru nouă noştri nu opt ori oricare orice oricine oricum oricând oricât oricînd oricît oriunde patra patru patrulea pe pentru peste pic poate pot prea prima primul prin puţin puţina puţină până pînă rog sa sale sau se spate spre sub sunt suntem sunteţi sută sînt sîntem sînteţi să săi său ta tale te timp tine toate toată tot totuşi toţi trei treia treilea tu tăi tău un una unde undeva unei uneia unele uneori unii unor unora unu unui unuia unul vi voastre voastră voi vostru vouă voştri vreme vreo vreun vă zece zero zi zice îi îl îmi împotriva în înainte înaintea încotro încât încît între întrucât întrucît îţi ăla ălea ăsta ăstea ăştia şapte şase şi ştiu ţi ţie".split(" ")),e.Pipeline.registerFunction(e.ro.stopWordFilter,"stopWordFilter-ro")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.ru.min.js b/assets/javascripts/lunr/min/lunr.ru.min.js new file mode 100644 index 000000000..186cc485c --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.ru.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `Russian` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(e,n){"function"==typeof define&&define.amd?define(n):"object"==typeof exports?module.exports=n():n()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.ru=function(){this.pipeline.reset(),this.pipeline.add(e.ru.trimmer,e.ru.stopWordFilter,e.ru.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.ru.stemmer))},e.ru.wordCharacters="Ѐ-҄҇-ԯᴫᵸⷠ-ⷿꙀ-ꚟ︮︯",e.ru.trimmer=e.trimmerSupport.generateTrimmer(e.ru.wordCharacters),e.Pipeline.registerFunction(e.ru.trimmer,"trimmer-ru"),e.ru.stemmer=function(){var n=e.stemmerSupport.Among,r=e.stemmerSupport.SnowballProgram,t=new function(){function e(){for(;!W.in_grouping(S,1072,1103);){if(W.cursor>=W.limit)return!1;W.cursor++}return!0}function t(){for(;!W.out_grouping(S,1072,1103);){if(W.cursor>=W.limit)return!1;W.cursor++}return!0}function w(){b=W.limit,_=b,e()&&(b=W.cursor,t()&&e()&&t()&&(_=W.cursor))}function i(){return _<=W.cursor}function u(e,n){var r,t;if(W.ket=W.cursor,r=W.find_among_b(e,n)){switch(W.bra=W.cursor,r){case 1:if(t=W.limit-W.cursor,!W.eq_s_b(1,"а")&&(W.cursor=W.limit-t,!W.eq_s_b(1,"я")))return!1;case 2:W.slice_del()}return!0}return!1}function o(){return u(h,9)}function s(e,n){var r;return W.ket=W.cursor,!!(r=W.find_among_b(e,n))&&(W.bra=W.cursor,1==r&&W.slice_del(),!0)}function c(){return s(g,26)}function m(){return!!c()&&(u(C,8),!0)}function f(){return s(k,2)}function l(){return u(P,46)}function a(){s(v,36)}function p(){var e;W.ket=W.cursor,(e=W.find_among_b(F,2))&&(W.bra=W.cursor,i()&&1==e&&W.slice_del())}function d(){var e;if(W.ket=W.cursor,e=W.find_among_b(q,4))switch(W.bra=W.cursor,e){case 1:if(W.slice_del(),W.ket=W.cursor,!W.eq_s_b(1,"н"))break;W.bra=W.cursor;case 2:if(!W.eq_s_b(1,"н"))break;case 3:W.slice_del()}}var _,b,h=[new n("в",-1,1),new n("ив",0,2),new n("ыв",0,2),new n("вши",-1,1),new n("ивши",3,2),new n("ывши",3,2),new n("вшись",-1,1),new n("ившись",6,2),new n("ывшись",6,2)],g=[new n("ее",-1,1),new n("ие",-1,1),new n("ое",-1,1),new n("ые",-1,1),new n("ими",-1,1),new n("ыми",-1,1),new n("ей",-1,1),new n("ий",-1,1),new n("ой",-1,1),new n("ый",-1,1),new n("ем",-1,1),new n("им",-1,1),new n("ом",-1,1),new n("ым",-1,1),new n("его",-1,1),new n("ого",-1,1),new n("ему",-1,1),new n("ому",-1,1),new n("их",-1,1),new n("ых",-1,1),new n("ею",-1,1),new n("ою",-1,1),new n("ую",-1,1),new n("юю",-1,1),new n("ая",-1,1),new n("яя",-1,1)],C=[new n("ем",-1,1),new n("нн",-1,1),new n("вш",-1,1),new n("ивш",2,2),new n("ывш",2,2),new n("щ",-1,1),new n("ющ",5,1),new n("ующ",6,2)],k=[new n("сь",-1,1),new n("ся",-1,1)],P=[new n("ла",-1,1),new n("ила",0,2),new n("ыла",0,2),new n("на",-1,1),new n("ена",3,2),new n("ете",-1,1),new n("ите",-1,2),new n("йте",-1,1),new n("ейте",7,2),new n("уйте",7,2),new n("ли",-1,1),new n("или",10,2),new n("ыли",10,2),new n("й",-1,1),new n("ей",13,2),new n("уй",13,2),new n("л",-1,1),new n("ил",16,2),new n("ыл",16,2),new n("ем",-1,1),new n("им",-1,2),new n("ым",-1,2),new n("н",-1,1),new n("ен",22,2),new n("ло",-1,1),new n("ило",24,2),new n("ыло",24,2),new n("но",-1,1),new n("ено",27,2),new n("нно",27,1),new n("ет",-1,1),new n("ует",30,2),new n("ит",-1,2),new n("ыт",-1,2),new n("ют",-1,1),new n("уют",34,2),new n("ят",-1,2),new n("ны",-1,1),new n("ены",37,2),new n("ть",-1,1),new n("ить",39,2),new n("ыть",39,2),new n("ешь",-1,1),new n("ишь",-1,2),new n("ю",-1,2),new n("ую",44,2)],v=[new n("а",-1,1),new n("ев",-1,1),new n("ов",-1,1),new n("е",-1,1),new n("ие",3,1),new n("ье",3,1),new n("и",-1,1),new n("еи",6,1),new n("ии",6,1),new n("ами",6,1),new n("ями",6,1),new n("иями",10,1),new n("й",-1,1),new n("ей",12,1),new n("ией",13,1),new n("ий",12,1),new n("ой",12,1),new n("ам",-1,1),new n("ем",-1,1),new n("ием",18,1),new n("ом",-1,1),new n("ям",-1,1),new n("иям",21,1),new n("о",-1,1),new n("у",-1,1),new n("ах",-1,1),new n("ях",-1,1),new n("иях",26,1),new n("ы",-1,1),new n("ь",-1,1),new n("ю",-1,1),new n("ию",30,1),new n("ью",30,1),new n("я",-1,1),new n("ия",33,1),new n("ья",33,1)],F=[new n("ост",-1,1),new n("ость",-1,1)],q=[new n("ейше",-1,1),new n("н",-1,2),new n("ейш",-1,1),new n("ь",-1,3)],S=[33,65,8,232],W=new r;this.setCurrent=function(e){W.setCurrent(e)},this.getCurrent=function(){return W.getCurrent()},this.stem=function(){return w(),W.cursor=W.limit,!(W.cursor=i&&(e-=i,t[e>>3]&1<<(7&e)))return this.cursor++,!0}return!1},in_grouping_b:function(t,i,s){if(this.cursor>this.limit_backward){var e=r.charCodeAt(this.cursor-1);if(e<=s&&e>=i&&(e-=i,t[e>>3]&1<<(7&e)))return this.cursor--,!0}return!1},out_grouping:function(t,i,s){if(this.cursors||e>3]&1<<(7&e)))return this.cursor++,!0}return!1},out_grouping_b:function(t,i,s){if(this.cursor>this.limit_backward){var e=r.charCodeAt(this.cursor-1);if(e>s||e>3]&1<<(7&e)))return this.cursor--,!0}return!1},eq_s:function(t,i){if(this.limit-this.cursor>1),f=0,l=o0||e==s||c)break;c=!0}}for(;;){var _=t[s];if(o>=_.s_size){if(this.cursor=n+_.s_size,!_.method)return _.result;var b=_.method();if(this.cursor=n+_.s_size,b)return _.result}if((s=_.substring_i)<0)return 0}},find_among_b:function(t,i){for(var s=0,e=i,n=this.cursor,u=this.limit_backward,o=0,h=0,c=!1;;){for(var a=s+(e-s>>1),f=0,l=o=0;m--){if(n-l==u){f=-1;break}if(f=r.charCodeAt(n-1-l)-_.s[m])break;l++}if(f<0?(e=a,h=l):(s=a,o=l),e-s<=1){if(s>0||e==s||c)break;c=!0}}for(;;){var _=t[s];if(o>=_.s_size){if(this.cursor=n-_.s_size,!_.method)return _.result;var b=_.method();if(this.cursor=n-_.s_size,b)return _.result}if((s=_.substring_i)<0)return 0}},replace_s:function(t,i,s){var e=s.length-(i-t),n=r.substring(0,t),u=r.substring(i);return r=n+s+u,this.limit+=e,this.cursor>=i?this.cursor+=e:this.cursor>t&&(this.cursor=t),e},slice_check:function(){if(this.bra<0||this.bra>this.ket||this.ket>this.limit||this.limit>r.length)throw"faulty slice operation"},slice_from:function(r){this.slice_check(),this.replace_s(this.bra,this.ket,r)},slice_del:function(){this.slice_from("")},insert:function(r,t,i){var s=this.replace_s(r,t,i);r<=this.bra&&(this.bra+=s),r<=this.ket&&(this.ket+=s)},slice_to:function(){return this.slice_check(),r.substring(this.bra,this.ket)},eq_v_b:function(r){return this.eq_s_b(r.length,r)}}}},r.trimmerSupport={generateTrimmer:function(r){var t=new RegExp("^[^"+r+"]+"),i=new RegExp("[^"+r+"]+$");return function(r){return"function"==typeof r.update?r.update(function(r){return r.replace(t,"").replace(i,"")}):r.replace(t,"").replace(i,"")}}}}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.sv.min.js b/assets/javascripts/lunr/min/lunr.sv.min.js new file mode 100644 index 000000000..3e5eb6400 --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.sv.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `Swedish` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.sv=function(){this.pipeline.reset(),this.pipeline.add(e.sv.trimmer,e.sv.stopWordFilter,e.sv.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.sv.stemmer))},e.sv.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",e.sv.trimmer=e.trimmerSupport.generateTrimmer(e.sv.wordCharacters),e.Pipeline.registerFunction(e.sv.trimmer,"trimmer-sv"),e.sv.stemmer=function(){var r=e.stemmerSupport.Among,n=e.stemmerSupport.SnowballProgram,t=new function(){function e(){var e,r=w.cursor+3;if(o=w.limit,0<=r||r<=w.limit){for(a=r;;){if(e=w.cursor,w.in_grouping(l,97,246)){w.cursor=e;break}if(w.cursor=e,w.cursor>=w.limit)return;w.cursor++}for(;!w.out_grouping(l,97,246);){if(w.cursor>=w.limit)return;w.cursor++}o=w.cursor,o=o&&(w.limit_backward=o,w.cursor=w.limit,w.ket=w.cursor,e=w.find_among_b(u,37),w.limit_backward=r,e))switch(w.bra=w.cursor,e){case 1:w.slice_del();break;case 2:w.in_grouping_b(d,98,121)&&w.slice_del()}}function i(){var e=w.limit_backward;w.cursor>=o&&(w.limit_backward=o,w.cursor=w.limit,w.find_among_b(c,7)&&(w.cursor=w.limit,w.ket=w.cursor,w.cursor>w.limit_backward&&(w.bra=--w.cursor,w.slice_del())),w.limit_backward=e)}function s(){var e,r;if(w.cursor>=o){if(r=w.limit_backward,w.limit_backward=o,w.cursor=w.limit,w.ket=w.cursor,e=w.find_among_b(m,5))switch(w.bra=w.cursor,e){case 1:w.slice_del();break;case 2:w.slice_from("lös");break;case 3:w.slice_from("full")}w.limit_backward=r}}var a,o,u=[new r("a",-1,1),new r("arna",0,1),new r("erna",0,1),new r("heterna",2,1),new r("orna",0,1),new r("ad",-1,1),new r("e",-1,1),new r("ade",6,1),new r("ande",6,1),new r("arne",6,1),new r("are",6,1),new r("aste",6,1),new r("en",-1,1),new r("anden",12,1),new r("aren",12,1),new r("heten",12,1),new r("ern",-1,1),new r("ar",-1,1),new r("er",-1,1),new r("heter",18,1),new r("or",-1,1),new r("s",-1,2),new r("as",21,1),new r("arnas",22,1),new r("ernas",22,1),new r("ornas",22,1),new r("es",21,1),new r("ades",26,1),new r("andes",26,1),new r("ens",21,1),new r("arens",29,1),new r("hetens",29,1),new r("erns",21,1),new r("at",-1,1),new r("andet",-1,1),new r("het",-1,1),new r("ast",-1,1)],c=[new r("dd",-1,-1),new r("gd",-1,-1),new r("nn",-1,-1),new r("dt",-1,-1),new r("gt",-1,-1),new r("kt",-1,-1),new r("tt",-1,-1)],m=[new r("ig",-1,1),new r("lig",0,1),new r("els",-1,1),new r("fullt",-1,3),new r("löst",-1,2)],l=[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,24,0,32],d=[119,127,149],w=new n;this.setCurrent=function(e){w.setCurrent(e)},this.getCurrent=function(){return w.getCurrent()},this.stem=function(){var r=w.cursor;return e(),w.limit_backward=r,w.cursor=w.limit,t(),w.cursor=w.limit,i(),w.cursor=w.limit,s(),!0}};return function(e){return"function"==typeof e.update?e.update(function(e){return t.setCurrent(e),t.stem(),t.getCurrent()}):(t.setCurrent(e),t.stem(),t.getCurrent())}}(),e.Pipeline.registerFunction(e.sv.stemmer,"stemmer-sv"),e.sv.stopWordFilter=e.generateStopWordFilter("alla allt att av blev bli blir blivit de dem den denna deras dess dessa det detta dig din dina ditt du där då efter ej eller en er era ert ett från för ha hade han hans har henne hennes hon honom hur här i icke ingen inom inte jag ju kan kunde man med mellan men mig min mina mitt mot mycket ni nu när någon något några och om oss på samma sedan sig sin sina sitta själv skulle som så sådan sådana sådant till under upp ut utan vad var vara varför varit varje vars vart vem vi vid vilka vilkas vilken vilket vår våra vårt än är åt över".split(" ")),e.Pipeline.registerFunction(e.sv.stopWordFilter,"stopWordFilter-sv")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.ta.min.js b/assets/javascripts/lunr/min/lunr.ta.min.js new file mode 100644 index 000000000..a644bed22 --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.ta.min.js @@ -0,0 +1 @@ +!function(e,t){"function"==typeof define&&define.amd?define(t):"object"==typeof exports?module.exports=t():t()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.ta=function(){this.pipeline.reset(),this.pipeline.add(e.ta.trimmer,e.ta.stopWordFilter,e.ta.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.ta.stemmer))},e.ta.wordCharacters="஀-உஊ-ஏஐ-ஙச-ட஠-னப-யர-ஹ஺-ிீ-௉ொ-௏ௐ-௙௚-௟௠-௩௪-௯௰-௹௺-௿a-zA-Za-zA-Z0-90-9",e.ta.trimmer=e.trimmerSupport.generateTrimmer(e.ta.wordCharacters),e.Pipeline.registerFunction(e.ta.trimmer,"trimmer-ta"),e.ta.stopWordFilter=e.generateStopWordFilter("அங்கு அங்கே அது அதை அந்த அவர் அவர்கள் அவள் அவன் அவை ஆக ஆகவே ஆகையால் ஆதலால் ஆதலினால் ஆனாலும் ஆனால் இங்கு இங்கே இது இதை இந்த இப்படி இவர் இவர்கள் இவள் இவன் இவை இவ்வளவு உனக்கு உனது உன் உன்னால் எங்கு எங்கே எது எதை எந்த எப்படி எவர் எவர்கள் எவள் எவன் எவை எவ்வளவு எனக்கு எனது எனவே என் என்ன என்னால் ஏது ஏன் தனது தன்னால் தானே தான் நாங்கள் நாம் நான் நீ நீங்கள்".split(" ")),e.ta.stemmer=function(){return function(e){return"function"==typeof e.update?e.update(function(e){return e}):e}}();var t=e.wordcut;t.init(),e.ta.tokenizer=function(r){if(!arguments.length||null==r||void 0==r)return[];if(Array.isArray(r))return r.map(function(t){return isLunr2?new e.Token(t.toLowerCase()):t.toLowerCase()});var i=r.toString().toLowerCase().replace(/^\s+/,"");return t.cut(i).split("|")},e.Pipeline.registerFunction(e.ta.stemmer,"stemmer-ta"),e.Pipeline.registerFunction(e.ta.stopWordFilter,"stopWordFilter-ta")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.te.min.js b/assets/javascripts/lunr/min/lunr.te.min.js new file mode 100644 index 000000000..9fa7a93b9 --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.te.min.js @@ -0,0 +1 @@ +!function(e,t){"function"==typeof define&&define.amd?define(t):"object"==typeof exports?module.exports=t():t()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.te=function(){this.pipeline.reset(),this.pipeline.add(e.te.trimmer,e.te.stopWordFilter,e.te.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(e.te.stemmer))},e.te.wordCharacters="ఀ-ఄఅ-ఔక-హా-ౌౕ-ౖౘ-ౚౠ-ౡౢ-ౣ౦-౯౸-౿఼ఽ్ౝ౷౤౥",e.te.trimmer=e.trimmerSupport.generateTrimmer(e.te.wordCharacters),e.Pipeline.registerFunction(e.te.trimmer,"trimmer-te"),e.te.stopWordFilter=e.generateStopWordFilter("అందరూ అందుబాటులో అడగండి అడగడం అడ్డంగా అనుగుణంగా అనుమతించు అనుమతిస్తుంది అయితే ఇప్పటికే ఉన్నారు ఎక్కడైనా ఎప్పుడు ఎవరైనా ఎవరో ఏ ఏదైనా ఏమైనప్పటికి ఒక ఒకరు కనిపిస్తాయి కాదు కూడా గా గురించి చుట్టూ చేయగలిగింది తగిన తర్వాత దాదాపు దూరంగా నిజంగా పై ప్రకారం ప్రక్కన మధ్య మరియు మరొక మళ్ళీ మాత్రమే మెచ్చుకో వద్ద వెంట వేరుగా వ్యతిరేకంగా సంబంధం".split(" ")),e.te.stemmer=function(){return function(e){return"function"==typeof e.update?e.update(function(e){return e}):e}}();var t=e.wordcut;t.init(),e.te.tokenizer=function(r){if(!arguments.length||null==r||void 0==r)return[];if(Array.isArray(r))return r.map(function(t){return isLunr2?new e.Token(t.toLowerCase()):t.toLowerCase()});var i=r.toString().toLowerCase().replace(/^\s+/,"");return t.cut(i).split("|")},e.Pipeline.registerFunction(e.te.stemmer,"stemmer-te"),e.Pipeline.registerFunction(e.te.stopWordFilter,"stopWordFilter-te")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.th.min.js b/assets/javascripts/lunr/min/lunr.th.min.js new file mode 100644 index 000000000..dee3aac6e --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.th.min.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var r="2"==e.version[0];e.th=function(){this.pipeline.reset(),this.pipeline.add(e.th.trimmer),r?this.tokenizer=e.th.tokenizer:(e.tokenizer&&(e.tokenizer=e.th.tokenizer),this.tokenizerFn&&(this.tokenizerFn=e.th.tokenizer))},e.th.wordCharacters="[฀-๿]",e.th.trimmer=e.trimmerSupport.generateTrimmer(e.th.wordCharacters),e.Pipeline.registerFunction(e.th.trimmer,"trimmer-th");var t=e.wordcut;t.init(),e.th.tokenizer=function(i){if(!arguments.length||null==i||void 0==i)return[];if(Array.isArray(i))return i.map(function(t){return r?new e.Token(t):t});var n=i.toString().replace(/^\s+/,"");return t.cut(n).split("|")}}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.tr.min.js b/assets/javascripts/lunr/min/lunr.tr.min.js new file mode 100644 index 000000000..563f6ec1f --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.tr.min.js @@ -0,0 +1,18 @@ +/*! + * Lunr languages, `Turkish` language + * https://github.com/MihaiValentin/lunr-languages + * + * Copyright 2014, Mihai Valentin + * http://www.mozilla.org/MPL/ + */ +/*! + * based on + * Snowball JavaScript Library v0.3 + * http://code.google.com/p/urim/ + * http://snowball.tartarus.org/ + * + * Copyright 2010, Oleg Mazko + * http://www.mozilla.org/MPL/ + */ + +!function(r,i){"function"==typeof define&&define.amd?define(i):"object"==typeof exports?module.exports=i():i()(r.lunr)}(this,function(){return function(r){if(void 0===r)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===r.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");r.tr=function(){this.pipeline.reset(),this.pipeline.add(r.tr.trimmer,r.tr.stopWordFilter,r.tr.stemmer),this.searchPipeline&&(this.searchPipeline.reset(),this.searchPipeline.add(r.tr.stemmer))},r.tr.wordCharacters="A-Za-zªºÀ-ÖØ-öø-ʸˠ-ˤᴀ-ᴥᴬ-ᵜᵢ-ᵥᵫ-ᵷᵹ-ᶾḀ-ỿⁱⁿₐ-ₜKÅℲⅎⅠ-ↈⱠ-ⱿꜢ-ꞇꞋ-ꞭꞰ-ꞷꟷ-ꟿꬰ-ꭚꭜ-ꭤff-stA-Za-z",r.tr.trimmer=r.trimmerSupport.generateTrimmer(r.tr.wordCharacters),r.Pipeline.registerFunction(r.tr.trimmer,"trimmer-tr"),r.tr.stemmer=function(){var i=r.stemmerSupport.Among,e=r.stemmerSupport.SnowballProgram,n=new function(){function r(r,i,e){for(;;){var n=Dr.limit-Dr.cursor;if(Dr.in_grouping_b(r,i,e)){Dr.cursor=Dr.limit-n;break}if(Dr.cursor=Dr.limit-n,Dr.cursor<=Dr.limit_backward)return!1;Dr.cursor--}return!0}function n(){var i,e;i=Dr.limit-Dr.cursor,r(Wr,97,305);for(var n=0;nDr.limit_backward&&(Dr.cursor--,e=Dr.limit-Dr.cursor,i()))?(Dr.cursor=Dr.limit-e,!0):(Dr.cursor=Dr.limit-n,r()?(Dr.cursor=Dr.limit-n,!1):(Dr.cursor=Dr.limit-n,!(Dr.cursor<=Dr.limit_backward)&&(Dr.cursor--,!!i()&&(Dr.cursor=Dr.limit-n,!0))))}function u(r){return t(r,function(){return Dr.in_grouping_b(Wr,97,305)})}function o(){return u(function(){return Dr.eq_s_b(1,"n")})}function s(){return u(function(){return Dr.eq_s_b(1,"s")})}function c(){return u(function(){return Dr.eq_s_b(1,"y")})}function l(){return t(function(){return Dr.in_grouping_b(Lr,105,305)},function(){return Dr.out_grouping_b(Wr,97,305)})}function a(){return Dr.find_among_b(ur,10)&&l()}function m(){return n()&&Dr.in_grouping_b(Lr,105,305)&&s()}function d(){return Dr.find_among_b(or,2)}function f(){return n()&&Dr.in_grouping_b(Lr,105,305)&&c()}function b(){return n()&&Dr.find_among_b(sr,4)}function w(){return n()&&Dr.find_among_b(cr,4)&&o()}function _(){return n()&&Dr.find_among_b(lr,2)&&c()}function k(){return n()&&Dr.find_among_b(ar,2)}function p(){return n()&&Dr.find_among_b(mr,4)}function g(){return n()&&Dr.find_among_b(dr,2)}function y(){return n()&&Dr.find_among_b(fr,4)}function z(){return n()&&Dr.find_among_b(br,2)}function v(){return n()&&Dr.find_among_b(wr,2)&&c()}function h(){return Dr.eq_s_b(2,"ki")}function q(){return n()&&Dr.find_among_b(_r,2)&&o()}function C(){return n()&&Dr.find_among_b(kr,4)&&c()}function P(){return n()&&Dr.find_among_b(pr,4)}function F(){return n()&&Dr.find_among_b(gr,4)&&c()}function S(){return Dr.find_among_b(yr,4)}function W(){return n()&&Dr.find_among_b(zr,2)}function L(){return n()&&Dr.find_among_b(vr,4)}function x(){return n()&&Dr.find_among_b(hr,8)}function A(){return Dr.find_among_b(qr,2)}function E(){return n()&&Dr.find_among_b(Cr,32)&&c()}function j(){return Dr.find_among_b(Pr,8)&&c()}function T(){return n()&&Dr.find_among_b(Fr,4)&&c()}function Z(){return Dr.eq_s_b(3,"ken")&&c()}function B(){var r=Dr.limit-Dr.cursor;return!(T()||(Dr.cursor=Dr.limit-r,E()||(Dr.cursor=Dr.limit-r,j()||(Dr.cursor=Dr.limit-r,Z()))))}function D(){if(A()){var r=Dr.limit-Dr.cursor;if(S()||(Dr.cursor=Dr.limit-r,W()||(Dr.cursor=Dr.limit-r,C()||(Dr.cursor=Dr.limit-r,P()||(Dr.cursor=Dr.limit-r,F()||(Dr.cursor=Dr.limit-r))))),T())return!1}return!0}function G(){if(W()){Dr.bra=Dr.cursor,Dr.slice_del();var r=Dr.limit-Dr.cursor;return Dr.ket=Dr.cursor,x()||(Dr.cursor=Dr.limit-r,E()||(Dr.cursor=Dr.limit-r,j()||(Dr.cursor=Dr.limit-r,T()||(Dr.cursor=Dr.limit-r)))),nr=!1,!1}return!0}function H(){if(!L())return!0;var r=Dr.limit-Dr.cursor;return!E()&&(Dr.cursor=Dr.limit-r,!j())}function I(){var r,i=Dr.limit-Dr.cursor;return!(S()||(Dr.cursor=Dr.limit-i,F()||(Dr.cursor=Dr.limit-i,P()||(Dr.cursor=Dr.limit-i,C()))))||(Dr.bra=Dr.cursor,Dr.slice_del(),r=Dr.limit-Dr.cursor,Dr.ket=Dr.cursor,T()||(Dr.cursor=Dr.limit-r),!1)}function J(){var r,i=Dr.limit-Dr.cursor;if(Dr.ket=Dr.cursor,nr=!0,B()&&(Dr.cursor=Dr.limit-i,D()&&(Dr.cursor=Dr.limit-i,G()&&(Dr.cursor=Dr.limit-i,H()&&(Dr.cursor=Dr.limit-i,I()))))){if(Dr.cursor=Dr.limit-i,!x())return;Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,r=Dr.limit-Dr.cursor,S()||(Dr.cursor=Dr.limit-r,W()||(Dr.cursor=Dr.limit-r,C()||(Dr.cursor=Dr.limit-r,P()||(Dr.cursor=Dr.limit-r,F()||(Dr.cursor=Dr.limit-r))))),T()||(Dr.cursor=Dr.limit-r)}Dr.bra=Dr.cursor,Dr.slice_del()}function K(){var r,i,e,n;if(Dr.ket=Dr.cursor,h()){if(r=Dr.limit-Dr.cursor,p())return Dr.bra=Dr.cursor,Dr.slice_del(),i=Dr.limit-Dr.cursor,Dr.ket=Dr.cursor,W()?(Dr.bra=Dr.cursor,Dr.slice_del(),K()):(Dr.cursor=Dr.limit-i,a()&&(Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K()))),!0;if(Dr.cursor=Dr.limit-r,w()){if(Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,e=Dr.limit-Dr.cursor,d())Dr.bra=Dr.cursor,Dr.slice_del();else{if(Dr.cursor=Dr.limit-e,Dr.ket=Dr.cursor,!a()&&(Dr.cursor=Dr.limit-e,!m()&&(Dr.cursor=Dr.limit-e,!K())))return!0;Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K())}return!0}if(Dr.cursor=Dr.limit-r,g()){if(n=Dr.limit-Dr.cursor,d())Dr.bra=Dr.cursor,Dr.slice_del();else if(Dr.cursor=Dr.limit-n,m())Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K());else if(Dr.cursor=Dr.limit-n,!K())return!1;return!0}}return!1}function M(r){if(Dr.ket=Dr.cursor,!g()&&(Dr.cursor=Dr.limit-r,!k()))return!1;var i=Dr.limit-Dr.cursor;if(d())Dr.bra=Dr.cursor,Dr.slice_del();else if(Dr.cursor=Dr.limit-i,m())Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K());else if(Dr.cursor=Dr.limit-i,!K())return!1;return!0}function N(r){if(Dr.ket=Dr.cursor,!z()&&(Dr.cursor=Dr.limit-r,!b()))return!1;var i=Dr.limit-Dr.cursor;return!(!m()&&(Dr.cursor=Dr.limit-i,!d()))&&(Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K()),!0)}function O(){var r,i=Dr.limit-Dr.cursor;return Dr.ket=Dr.cursor,!(!w()&&(Dr.cursor=Dr.limit-i,!v()))&&(Dr.bra=Dr.cursor,Dr.slice_del(),r=Dr.limit-Dr.cursor,Dr.ket=Dr.cursor,!(!W()||(Dr.bra=Dr.cursor,Dr.slice_del(),!K()))||(Dr.cursor=Dr.limit-r,Dr.ket=Dr.cursor,!(a()||(Dr.cursor=Dr.limit-r,m()||(Dr.cursor=Dr.limit-r,K())))||(Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K()),!0)))}function Q(){var r,i,e=Dr.limit-Dr.cursor;if(Dr.ket=Dr.cursor,!p()&&(Dr.cursor=Dr.limit-e,!f()&&(Dr.cursor=Dr.limit-e,!_())))return!1;if(Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,r=Dr.limit-Dr.cursor,a())Dr.bra=Dr.cursor,Dr.slice_del(),i=Dr.limit-Dr.cursor,Dr.ket=Dr.cursor,W()||(Dr.cursor=Dr.limit-i);else if(Dr.cursor=Dr.limit-r,!W())return!0;return Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,K(),!0}function R(){var r,i,e=Dr.limit-Dr.cursor;if(Dr.ket=Dr.cursor,W())return Dr.bra=Dr.cursor,Dr.slice_del(),void K();if(Dr.cursor=Dr.limit-e,Dr.ket=Dr.cursor,q())if(Dr.bra=Dr.cursor,Dr.slice_del(),r=Dr.limit-Dr.cursor,Dr.ket=Dr.cursor,d())Dr.bra=Dr.cursor,Dr.slice_del();else{if(Dr.cursor=Dr.limit-r,Dr.ket=Dr.cursor,!a()&&(Dr.cursor=Dr.limit-r,!m())){if(Dr.cursor=Dr.limit-r,Dr.ket=Dr.cursor,!W())return;if(Dr.bra=Dr.cursor,Dr.slice_del(),!K())return}Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K())}else if(Dr.cursor=Dr.limit-e,!M(e)&&(Dr.cursor=Dr.limit-e,!N(e))){if(Dr.cursor=Dr.limit-e,Dr.ket=Dr.cursor,y())return Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,i=Dr.limit-Dr.cursor,void(a()?(Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K())):(Dr.cursor=Dr.limit-i,W()?(Dr.bra=Dr.cursor,Dr.slice_del(),K()):(Dr.cursor=Dr.limit-i,K())));if(Dr.cursor=Dr.limit-e,!O()){if(Dr.cursor=Dr.limit-e,d())return Dr.bra=Dr.cursor,void Dr.slice_del();Dr.cursor=Dr.limit-e,K()||(Dr.cursor=Dr.limit-e,Q()||(Dr.cursor=Dr.limit-e,Dr.ket=Dr.cursor,(a()||(Dr.cursor=Dr.limit-e,m()))&&(Dr.bra=Dr.cursor,Dr.slice_del(),Dr.ket=Dr.cursor,W()&&(Dr.bra=Dr.cursor,Dr.slice_del(),K()))))}}}function U(){var r;if(Dr.ket=Dr.cursor,r=Dr.find_among_b(Sr,4))switch(Dr.bra=Dr.cursor,r){case 1:Dr.slice_from("p");break;case 2:Dr.slice_from("ç");break;case 3:Dr.slice_from("t");break;case 4:Dr.slice_from("k")}}function V(){for(;;){var r=Dr.limit-Dr.cursor;if(Dr.in_grouping_b(Wr,97,305)){Dr.cursor=Dr.limit-r;break}if(Dr.cursor=Dr.limit-r,Dr.cursor<=Dr.limit_backward)return!1;Dr.cursor--}return!0}function X(r,i,e){if(Dr.cursor=Dr.limit-r,V()){var n=Dr.limit-Dr.cursor;if(!Dr.eq_s_b(1,i)&&(Dr.cursor=Dr.limit-n,!Dr.eq_s_b(1,e)))return!0;Dr.cursor=Dr.limit-r;var t=Dr.cursor;return Dr.insert(Dr.cursor,Dr.cursor,e),Dr.cursor=t,!1}return!0}function Y(){var r=Dr.limit-Dr.cursor;(Dr.eq_s_b(1,"d")||(Dr.cursor=Dr.limit-r,Dr.eq_s_b(1,"g")))&&X(r,"a","ı")&&X(r,"e","i")&&X(r,"o","u")&&X(r,"ö","ü")}function $(){for(var r,i=Dr.cursor,e=2;;){for(r=Dr.cursor;!Dr.in_grouping(Wr,97,305);){if(Dr.cursor>=Dr.limit)return Dr.cursor=r,!(e>0)&&(Dr.cursor=i,!0);Dr.cursor++}e--}}function rr(r,i,e){for(;!Dr.eq_s(i,e);){if(Dr.cursor>=Dr.limit)return!0;Dr.cursor++}return(tr=i)!=Dr.limit||(Dr.cursor=r,!1)}function ir(){var r=Dr.cursor;return!rr(r,2,"ad")||(Dr.cursor=r,!rr(r,5,"soyad"))}function er(){var r=Dr.cursor;return!ir()&&(Dr.limit_backward=r,Dr.cursor=Dr.limit,Y(),Dr.cursor=Dr.limit,U(),!0)}var nr,tr,ur=[new i("m",-1,-1),new i("n",-1,-1),new i("miz",-1,-1),new i("niz",-1,-1),new i("muz",-1,-1),new i("nuz",-1,-1),new i("müz",-1,-1),new i("nüz",-1,-1),new i("mız",-1,-1),new i("nız",-1,-1)],or=[new i("leri",-1,-1),new i("ları",-1,-1)],sr=[new i("ni",-1,-1),new i("nu",-1,-1),new i("nü",-1,-1),new i("nı",-1,-1)],cr=[new i("in",-1,-1),new i("un",-1,-1),new i("ün",-1,-1),new i("ın",-1,-1)],lr=[new i("a",-1,-1),new i("e",-1,-1)],ar=[new i("na",-1,-1),new i("ne",-1,-1)],mr=[new i("da",-1,-1),new i("ta",-1,-1),new i("de",-1,-1),new i("te",-1,-1)],dr=[new i("nda",-1,-1),new i("nde",-1,-1)],fr=[new i("dan",-1,-1),new i("tan",-1,-1),new i("den",-1,-1),new i("ten",-1,-1)],br=[new i("ndan",-1,-1),new i("nden",-1,-1)],wr=[new i("la",-1,-1),new i("le",-1,-1)],_r=[new i("ca",-1,-1),new i("ce",-1,-1)],kr=[new i("im",-1,-1),new i("um",-1,-1),new i("üm",-1,-1),new i("ım",-1,-1)],pr=[new i("sin",-1,-1),new i("sun",-1,-1),new i("sün",-1,-1),new i("sın",-1,-1)],gr=[new i("iz",-1,-1),new i("uz",-1,-1),new i("üz",-1,-1),new i("ız",-1,-1)],yr=[new i("siniz",-1,-1),new i("sunuz",-1,-1),new i("sünüz",-1,-1),new i("sınız",-1,-1)],zr=[new i("lar",-1,-1),new i("ler",-1,-1)],vr=[new i("niz",-1,-1),new i("nuz",-1,-1),new i("nüz",-1,-1),new i("nız",-1,-1)],hr=[new i("dir",-1,-1),new i("tir",-1,-1),new i("dur",-1,-1),new i("tur",-1,-1),new i("dür",-1,-1),new i("tür",-1,-1),new i("dır",-1,-1),new i("tır",-1,-1)],qr=[new i("casına",-1,-1),new i("cesine",-1,-1)],Cr=[new i("di",-1,-1),new i("ti",-1,-1),new i("dik",-1,-1),new i("tik",-1,-1),new i("duk",-1,-1),new i("tuk",-1,-1),new i("dük",-1,-1),new i("tük",-1,-1),new i("dık",-1,-1),new i("tık",-1,-1),new i("dim",-1,-1),new i("tim",-1,-1),new i("dum",-1,-1),new i("tum",-1,-1),new i("düm",-1,-1),new i("tüm",-1,-1),new i("dım",-1,-1),new i("tım",-1,-1),new i("din",-1,-1),new i("tin",-1,-1),new i("dun",-1,-1),new i("tun",-1,-1),new i("dün",-1,-1),new i("tün",-1,-1),new i("dın",-1,-1),new i("tın",-1,-1),new i("du",-1,-1),new i("tu",-1,-1),new i("dü",-1,-1),new i("tü",-1,-1),new i("dı",-1,-1),new i("tı",-1,-1)],Pr=[new i("sa",-1,-1),new i("se",-1,-1),new i("sak",-1,-1),new i("sek",-1,-1),new i("sam",-1,-1),new i("sem",-1,-1),new i("san",-1,-1),new i("sen",-1,-1)],Fr=[new i("miş",-1,-1),new i("muş",-1,-1),new i("müş",-1,-1),new i("mış",-1,-1)],Sr=[new i("b",-1,1),new i("c",-1,2),new i("d",-1,3),new i("ğ",-1,4)],Wr=[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,32,8,0,0,0,0,0,0,1],Lr=[1,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,0,0,0,0,0,1],xr=[1,64,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],Ar=[17,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,130],Er=[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1],jr=[17],Tr=[65],Zr=[65],Br=[["a",xr,97,305],["e",Ar,101,252],["ı",Er,97,305],["i",jr,101,105],["o",Tr,111,117],["ö",Zr,246,252],["u",Tr,111,117]],Dr=new e;this.setCurrent=function(r){Dr.setCurrent(r)},this.getCurrent=function(){return Dr.getCurrent()},this.stem=function(){return!!($()&&(Dr.limit_backward=Dr.cursor,Dr.cursor=Dr.limit,J(),Dr.cursor=Dr.limit,nr&&(R(),Dr.cursor=Dr.limit_backward,er())))}};return function(r){return"function"==typeof r.update?r.update(function(r){return n.setCurrent(r),n.stem(),n.getCurrent()}):(n.setCurrent(r),n.stem(),n.getCurrent())}}(),r.Pipeline.registerFunction(r.tr.stemmer,"stemmer-tr"),r.tr.stopWordFilter=r.generateStopWordFilter("acaba altmış altı ama ancak arada aslında ayrıca bana bazı belki ben benden beni benim beri beş bile bin bir biri birkaç birkez birçok birşey birşeyi biz bizden bize bizi bizim bu buna bunda bundan bunlar bunları bunların bunu bunun burada böyle böylece da daha dahi de defa değil diye diğer doksan dokuz dolayı dolayısıyla dört edecek eden ederek edilecek ediliyor edilmesi ediyor elli en etmesi etti ettiği ettiğini eğer gibi göre halen hangi hatta hem henüz hep hepsi her herhangi herkesin hiç hiçbir iki ile ilgili ise itibaren itibariyle için işte kadar karşın katrilyon kendi kendilerine kendini kendisi kendisine kendisini kez ki kim kimden kime kimi kimse kırk milyar milyon mu mü mı nasıl ne neden nedenle nerde nerede nereye niye niçin o olan olarak oldu olduklarını olduğu olduğunu olmadı olmadığı olmak olması olmayan olmaz olsa olsun olup olur olursa oluyor on ona ondan onlar onlardan onları onların onu onun otuz oysa pek rağmen sadece sanki sekiz seksen sen senden seni senin siz sizden sizi sizin tarafından trilyon tüm var vardı ve veya ya yani yapacak yapmak yaptı yaptıkları yaptığı yaptığını yapılan yapılması yapıyor yedi yerine yetmiş yine yirmi yoksa yüz zaten çok çünkü öyle üzere üç şey şeyden şeyi şeyler şu şuna şunda şundan şunları şunu şöyle".split(" ")),r.Pipeline.registerFunction(r.tr.stopWordFilter,"stopWordFilter-tr")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.vi.min.js b/assets/javascripts/lunr/min/lunr.vi.min.js new file mode 100644 index 000000000..22aed28c4 --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.vi.min.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r():r()(e.lunr)}(this,function(){return function(e){if(void 0===e)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===e.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");e.vi=function(){this.pipeline.reset(),this.pipeline.add(e.vi.stopWordFilter,e.vi.trimmer)},e.vi.wordCharacters="[A-Za-ẓ̀͐́͑̉̃̓ÂâÊêÔôĂ-ăĐ-đƠ-ơƯ-ư]",e.vi.trimmer=e.trimmerSupport.generateTrimmer(e.vi.wordCharacters),e.Pipeline.registerFunction(e.vi.trimmer,"trimmer-vi"),e.vi.stopWordFilter=e.generateStopWordFilter("là cái nhưng mà".split(" "))}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/min/lunr.zh.min.js b/assets/javascripts/lunr/min/lunr.zh.min.js new file mode 100644 index 000000000..fda66e9c5 --- /dev/null +++ b/assets/javascripts/lunr/min/lunr.zh.min.js @@ -0,0 +1 @@ +!function(e,r){"function"==typeof define&&define.amd?define(r):"object"==typeof exports?module.exports=r(require("@node-rs/jieba")):r()(e.lunr)}(this,function(e){return function(r,t){if(void 0===r)throw new Error("Lunr is not present. Please include / require Lunr before this script.");if(void 0===r.stemmerSupport)throw new Error("Lunr stemmer support is not present. Please include / require Lunr stemmer support before this script.");var i="2"==r.version[0];r.zh=function(){this.pipeline.reset(),this.pipeline.add(r.zh.trimmer,r.zh.stopWordFilter,r.zh.stemmer),i?this.tokenizer=r.zh.tokenizer:(r.tokenizer&&(r.tokenizer=r.zh.tokenizer),this.tokenizerFn&&(this.tokenizerFn=r.zh.tokenizer))},r.zh.tokenizer=function(n){if(!arguments.length||null==n||void 0==n)return[];if(Array.isArray(n))return n.map(function(e){return i?new r.Token(e.toLowerCase()):e.toLowerCase()});t&&e.load(t);var o=n.toString().trim().toLowerCase(),s=[];e.cut(o,!0).forEach(function(e){s=s.concat(e.split(" "))}),s=s.filter(function(e){return!!e});var u=0;return s.map(function(e,t){if(i){var n=o.indexOf(e,u),s={};return s.position=[n,e.length],s.index=t,u=n,new r.Token(e,s)}return e})},r.zh.wordCharacters="\\w一-龥",r.zh.trimmer=r.trimmerSupport.generateTrimmer(r.zh.wordCharacters),r.Pipeline.registerFunction(r.zh.trimmer,"trimmer-zh"),r.zh.stemmer=function(){return function(e){return e}}(),r.Pipeline.registerFunction(r.zh.stemmer,"stemmer-zh"),r.zh.stopWordFilter=r.generateStopWordFilter("的 一 不 在 人 有 是 为 為 以 于 於 上 他 而 后 後 之 来 來 及 了 因 下 可 到 由 这 這 与 與 也 此 但 并 並 个 個 其 已 无 無 小 我 们 們 起 最 再 今 去 好 只 又 或 很 亦 某 把 那 你 乃 它 吧 被 比 别 趁 当 當 从 從 得 打 凡 儿 兒 尔 爾 该 該 各 给 給 跟 和 何 还 還 即 几 幾 既 看 据 據 距 靠 啦 另 么 麽 每 嘛 拿 哪 您 凭 憑 且 却 卻 让 讓 仍 啥 如 若 使 谁 誰 虽 雖 随 隨 同 所 她 哇 嗡 往 些 向 沿 哟 喲 用 咱 则 則 怎 曾 至 致 着 著 诸 諸 自".split(" ")),r.Pipeline.registerFunction(r.zh.stopWordFilter,"stopWordFilter-zh")}}); \ No newline at end of file diff --git a/assets/javascripts/lunr/tinyseg.js b/assets/javascripts/lunr/tinyseg.js new file mode 100644 index 000000000..167fa6dd6 --- /dev/null +++ b/assets/javascripts/lunr/tinyseg.js @@ -0,0 +1,206 @@ +/** + * export the module via AMD, CommonJS or as a browser global + * Export code from https://github.com/umdjs/umd/blob/master/returnExports.js + */ +;(function (root, factory) { + if (typeof define === 'function' && define.amd) { + // AMD. Register as an anonymous module. + define(factory) + } else if (typeof exports === 'object') { + /** + * Node. Does not work with strict CommonJS, but + * only CommonJS-like environments that support module.exports, + * like Node. + */ + module.exports = factory() + } else { + // Browser globals (root is window) + factory()(root.lunr); + } +}(this, function () { + /** + * Just return a value to define the module export. + * This example returns an object, but the module + * can return a function as the exported value. + */ + + return function(lunr) { + // TinySegmenter 0.1 -- Super compact Japanese tokenizer in Javascript + // (c) 2008 Taku Kudo + // TinySegmenter is freely distributable under the terms of a new BSD licence. + // For details, see http://chasen.org/~taku/software/TinySegmenter/LICENCE.txt + + function TinySegmenter() { + var patterns = { + "[一二三四五六七八九十百千万億兆]":"M", + "[一-龠々〆ヵヶ]":"H", + "[ぁ-ん]":"I", + "[ァ-ヴーア-ン゙ー]":"K", + "[a-zA-Za-zA-Z]":"A", + "[0-90-9]":"N" + } + this.chartype_ = []; + for (var i in patterns) { + var regexp = new RegExp(i); + this.chartype_.push([regexp, patterns[i]]); + } + + this.BIAS__ = -332 + this.BC1__ = {"HH":6,"II":2461,"KH":406,"OH":-1378}; + this.BC2__ = {"AA":-3267,"AI":2744,"AN":-878,"HH":-4070,"HM":-1711,"HN":4012,"HO":3761,"IA":1327,"IH":-1184,"II":-1332,"IK":1721,"IO":5492,"KI":3831,"KK":-8741,"MH":-3132,"MK":3334,"OO":-2920}; + this.BC3__ = {"HH":996,"HI":626,"HK":-721,"HN":-1307,"HO":-836,"IH":-301,"KK":2762,"MK":1079,"MM":4034,"OA":-1652,"OH":266}; + this.BP1__ = {"BB":295,"OB":304,"OO":-125,"UB":352}; + this.BP2__ = {"BO":60,"OO":-1762}; + this.BQ1__ = {"BHH":1150,"BHM":1521,"BII":-1158,"BIM":886,"BMH":1208,"BNH":449,"BOH":-91,"BOO":-2597,"OHI":451,"OIH":-296,"OKA":1851,"OKH":-1020,"OKK":904,"OOO":2965}; + this.BQ2__ = {"BHH":118,"BHI":-1159,"BHM":466,"BIH":-919,"BKK":-1720,"BKO":864,"OHH":-1139,"OHM":-181,"OIH":153,"UHI":-1146}; + this.BQ3__ = {"BHH":-792,"BHI":2664,"BII":-299,"BKI":419,"BMH":937,"BMM":8335,"BNN":998,"BOH":775,"OHH":2174,"OHM":439,"OII":280,"OKH":1798,"OKI":-793,"OKO":-2242,"OMH":-2402,"OOO":11699}; + this.BQ4__ = {"BHH":-3895,"BIH":3761,"BII":-4654,"BIK":1348,"BKK":-1806,"BMI":-3385,"BOO":-12396,"OAH":926,"OHH":266,"OHK":-2036,"ONN":-973}; + this.BW1__ = {",と":660,",同":727,"B1あ":1404,"B1同":542,"、と":660,"、同":727,"」と":1682,"あっ":1505,"いう":1743,"いっ":-2055,"いる":672,"うし":-4817,"うん":665,"から":3472,"がら":600,"こう":-790,"こと":2083,"こん":-1262,"さら":-4143,"さん":4573,"した":2641,"して":1104,"すで":-3399,"そこ":1977,"それ":-871,"たち":1122,"ため":601,"った":3463,"つい":-802,"てい":805,"てき":1249,"でき":1127,"です":3445,"では":844,"とい":-4915,"とみ":1922,"どこ":3887,"ない":5713,"なっ":3015,"など":7379,"なん":-1113,"にし":2468,"には":1498,"にも":1671,"に対":-912,"の一":-501,"の中":741,"ませ":2448,"まで":1711,"まま":2600,"まる":-2155,"やむ":-1947,"よっ":-2565,"れた":2369,"れで":-913,"をし":1860,"を見":731,"亡く":-1886,"京都":2558,"取り":-2784,"大き":-2604,"大阪":1497,"平方":-2314,"引き":-1336,"日本":-195,"本当":-2423,"毎日":-2113,"目指":-724,"B1あ":1404,"B1同":542,"」と":1682}; + this.BW2__ = {"..":-11822,"11":-669,"――":-5730,"−−":-13175,"いう":-1609,"うか":2490,"かし":-1350,"かも":-602,"から":-7194,"かれ":4612,"がい":853,"がら":-3198,"きた":1941,"くな":-1597,"こと":-8392,"この":-4193,"させ":4533,"され":13168,"さん":-3977,"しい":-1819,"しか":-545,"した":5078,"して":972,"しな":939,"その":-3744,"たい":-1253,"たた":-662,"ただ":-3857,"たち":-786,"たと":1224,"たは":-939,"った":4589,"って":1647,"っと":-2094,"てい":6144,"てき":3640,"てく":2551,"ては":-3110,"ても":-3065,"でい":2666,"でき":-1528,"でし":-3828,"です":-4761,"でも":-4203,"とい":1890,"とこ":-1746,"とと":-2279,"との":720,"とみ":5168,"とも":-3941,"ない":-2488,"なが":-1313,"など":-6509,"なの":2614,"なん":3099,"にお":-1615,"にし":2748,"にな":2454,"によ":-7236,"に対":-14943,"に従":-4688,"に関":-11388,"のか":2093,"ので":-7059,"のに":-6041,"のの":-6125,"はい":1073,"はが":-1033,"はず":-2532,"ばれ":1813,"まし":-1316,"まで":-6621,"まれ":5409,"めて":-3153,"もい":2230,"もの":-10713,"らか":-944,"らし":-1611,"らに":-1897,"りし":651,"りま":1620,"れた":4270,"れて":849,"れば":4114,"ろう":6067,"われ":7901,"を通":-11877,"んだ":728,"んな":-4115,"一人":602,"一方":-1375,"一日":970,"一部":-1051,"上が":-4479,"会社":-1116,"出て":2163,"分の":-7758,"同党":970,"同日":-913,"大阪":-2471,"委員":-1250,"少な":-1050,"年度":-8669,"年間":-1626,"府県":-2363,"手権":-1982,"新聞":-4066,"日新":-722,"日本":-7068,"日米":3372,"曜日":-601,"朝鮮":-2355,"本人":-2697,"東京":-1543,"然と":-1384,"社会":-1276,"立て":-990,"第に":-1612,"米国":-4268,"11":-669}; + this.BW3__ = {"あた":-2194,"あり":719,"ある":3846,"い.":-1185,"い。":-1185,"いい":5308,"いえ":2079,"いく":3029,"いた":2056,"いっ":1883,"いる":5600,"いわ":1527,"うち":1117,"うと":4798,"えと":1454,"か.":2857,"か。":2857,"かけ":-743,"かっ":-4098,"かに":-669,"から":6520,"かり":-2670,"が,":1816,"が、":1816,"がき":-4855,"がけ":-1127,"がっ":-913,"がら":-4977,"がり":-2064,"きた":1645,"けど":1374,"こと":7397,"この":1542,"ころ":-2757,"さい":-714,"さを":976,"し,":1557,"し、":1557,"しい":-3714,"した":3562,"して":1449,"しな":2608,"しま":1200,"す.":-1310,"す。":-1310,"する":6521,"ず,":3426,"ず、":3426,"ずに":841,"そう":428,"た.":8875,"た。":8875,"たい":-594,"たの":812,"たり":-1183,"たる":-853,"だ.":4098,"だ。":4098,"だっ":1004,"った":-4748,"って":300,"てい":6240,"てお":855,"ても":302,"です":1437,"でに":-1482,"では":2295,"とう":-1387,"とし":2266,"との":541,"とも":-3543,"どう":4664,"ない":1796,"なく":-903,"など":2135,"に,":-1021,"に、":-1021,"にし":1771,"にな":1906,"には":2644,"の,":-724,"の、":-724,"の子":-1000,"は,":1337,"は、":1337,"べき":2181,"まし":1113,"ます":6943,"まっ":-1549,"まで":6154,"まれ":-793,"らし":1479,"られ":6820,"るる":3818,"れ,":854,"れ、":854,"れた":1850,"れて":1375,"れば":-3246,"れる":1091,"われ":-605,"んだ":606,"んで":798,"カ月":990,"会議":860,"入り":1232,"大会":2217,"始め":1681,"市":965,"新聞":-5055,"日,":974,"日、":974,"社会":2024,"カ月":990}; + this.TC1__ = {"AAA":1093,"HHH":1029,"HHM":580,"HII":998,"HOH":-390,"HOM":-331,"IHI":1169,"IOH":-142,"IOI":-1015,"IOM":467,"MMH":187,"OOI":-1832}; + this.TC2__ = {"HHO":2088,"HII":-1023,"HMM":-1154,"IHI":-1965,"KKH":703,"OII":-2649}; + this.TC3__ = {"AAA":-294,"HHH":346,"HHI":-341,"HII":-1088,"HIK":731,"HOH":-1486,"IHH":128,"IHI":-3041,"IHO":-1935,"IIH":-825,"IIM":-1035,"IOI":-542,"KHH":-1216,"KKA":491,"KKH":-1217,"KOK":-1009,"MHH":-2694,"MHM":-457,"MHO":123,"MMH":-471,"NNH":-1689,"NNO":662,"OHO":-3393}; + this.TC4__ = {"HHH":-203,"HHI":1344,"HHK":365,"HHM":-122,"HHN":182,"HHO":669,"HIH":804,"HII":679,"HOH":446,"IHH":695,"IHO":-2324,"IIH":321,"III":1497,"IIO":656,"IOO":54,"KAK":4845,"KKA":3386,"KKK":3065,"MHH":-405,"MHI":201,"MMH":-241,"MMM":661,"MOM":841}; + this.TQ1__ = {"BHHH":-227,"BHHI":316,"BHIH":-132,"BIHH":60,"BIII":1595,"BNHH":-744,"BOHH":225,"BOOO":-908,"OAKK":482,"OHHH":281,"OHIH":249,"OIHI":200,"OIIH":-68}; + this.TQ2__ = {"BIHH":-1401,"BIII":-1033,"BKAK":-543,"BOOO":-5591}; + this.TQ3__ = {"BHHH":478,"BHHM":-1073,"BHIH":222,"BHII":-504,"BIIH":-116,"BIII":-105,"BMHI":-863,"BMHM":-464,"BOMH":620,"OHHH":346,"OHHI":1729,"OHII":997,"OHMH":481,"OIHH":623,"OIIH":1344,"OKAK":2792,"OKHH":587,"OKKA":679,"OOHH":110,"OOII":-685}; + this.TQ4__ = {"BHHH":-721,"BHHM":-3604,"BHII":-966,"BIIH":-607,"BIII":-2181,"OAAA":-2763,"OAKK":180,"OHHH":-294,"OHHI":2446,"OHHO":480,"OHIH":-1573,"OIHH":1935,"OIHI":-493,"OIIH":626,"OIII":-4007,"OKAK":-8156}; + this.TW1__ = {"につい":-4681,"東京都":2026}; + this.TW2__ = {"ある程":-2049,"いった":-1256,"ころが":-2434,"しょう":3873,"その後":-4430,"だって":-1049,"ていた":1833,"として":-4657,"ともに":-4517,"もので":1882,"一気に":-792,"初めて":-1512,"同時に":-8097,"大きな":-1255,"対して":-2721,"社会党":-3216}; + this.TW3__ = {"いただ":-1734,"してい":1314,"として":-4314,"につい":-5483,"にとっ":-5989,"に当た":-6247,"ので,":-727,"ので、":-727,"のもの":-600,"れから":-3752,"十二月":-2287}; + this.TW4__ = {"いう.":8576,"いう。":8576,"からな":-2348,"してい":2958,"たが,":1516,"たが、":1516,"ている":1538,"という":1349,"ました":5543,"ません":1097,"ようと":-4258,"よると":5865}; + this.UC1__ = {"A":484,"K":93,"M":645,"O":-505}; + this.UC2__ = {"A":819,"H":1059,"I":409,"M":3987,"N":5775,"O":646}; + this.UC3__ = {"A":-1370,"I":2311}; + this.UC4__ = {"A":-2643,"H":1809,"I":-1032,"K":-3450,"M":3565,"N":3876,"O":6646}; + this.UC5__ = {"H":313,"I":-1238,"K":-799,"M":539,"O":-831}; + this.UC6__ = {"H":-506,"I":-253,"K":87,"M":247,"O":-387}; + this.UP1__ = {"O":-214}; + this.UP2__ = {"B":69,"O":935}; + this.UP3__ = {"B":189}; + this.UQ1__ = {"BH":21,"BI":-12,"BK":-99,"BN":142,"BO":-56,"OH":-95,"OI":477,"OK":410,"OO":-2422}; + this.UQ2__ = {"BH":216,"BI":113,"OK":1759}; + this.UQ3__ = {"BA":-479,"BH":42,"BI":1913,"BK":-7198,"BM":3160,"BN":6427,"BO":14761,"OI":-827,"ON":-3212}; + this.UW1__ = {",":156,"、":156,"「":-463,"あ":-941,"う":-127,"が":-553,"き":121,"こ":505,"で":-201,"と":-547,"ど":-123,"に":-789,"の":-185,"は":-847,"も":-466,"や":-470,"よ":182,"ら":-292,"り":208,"れ":169,"を":-446,"ん":-137,"・":-135,"主":-402,"京":-268,"区":-912,"午":871,"国":-460,"大":561,"委":729,"市":-411,"日":-141,"理":361,"生":-408,"県":-386,"都":-718,"「":-463,"・":-135}; + this.UW2__ = {",":-829,"、":-829,"〇":892,"「":-645,"」":3145,"あ":-538,"い":505,"う":134,"お":-502,"か":1454,"が":-856,"く":-412,"こ":1141,"さ":878,"ざ":540,"し":1529,"す":-675,"せ":300,"そ":-1011,"た":188,"だ":1837,"つ":-949,"て":-291,"で":-268,"と":-981,"ど":1273,"な":1063,"に":-1764,"の":130,"は":-409,"ひ":-1273,"べ":1261,"ま":600,"も":-1263,"や":-402,"よ":1639,"り":-579,"る":-694,"れ":571,"を":-2516,"ん":2095,"ア":-587,"カ":306,"キ":568,"ッ":831,"三":-758,"不":-2150,"世":-302,"中":-968,"主":-861,"事":492,"人":-123,"会":978,"保":362,"入":548,"初":-3025,"副":-1566,"北":-3414,"区":-422,"大":-1769,"天":-865,"太":-483,"子":-1519,"学":760,"実":1023,"小":-2009,"市":-813,"年":-1060,"強":1067,"手":-1519,"揺":-1033,"政":1522,"文":-1355,"新":-1682,"日":-1815,"明":-1462,"最":-630,"朝":-1843,"本":-1650,"東":-931,"果":-665,"次":-2378,"民":-180,"気":-1740,"理":752,"発":529,"目":-1584,"相":-242,"県":-1165,"立":-763,"第":810,"米":509,"自":-1353,"行":838,"西":-744,"見":-3874,"調":1010,"議":1198,"込":3041,"開":1758,"間":-1257,"「":-645,"」":3145,"ッ":831,"ア":-587,"カ":306,"キ":568}; + this.UW3__ = {",":4889,"1":-800,"−":-1723,"、":4889,"々":-2311,"〇":5827,"」":2670,"〓":-3573,"あ":-2696,"い":1006,"う":2342,"え":1983,"お":-4864,"か":-1163,"が":3271,"く":1004,"け":388,"げ":401,"こ":-3552,"ご":-3116,"さ":-1058,"し":-395,"す":584,"せ":3685,"そ":-5228,"た":842,"ち":-521,"っ":-1444,"つ":-1081,"て":6167,"で":2318,"と":1691,"ど":-899,"な":-2788,"に":2745,"の":4056,"は":4555,"ひ":-2171,"ふ":-1798,"へ":1199,"ほ":-5516,"ま":-4384,"み":-120,"め":1205,"も":2323,"や":-788,"よ":-202,"ら":727,"り":649,"る":5905,"れ":2773,"わ":-1207,"を":6620,"ん":-518,"ア":551,"グ":1319,"ス":874,"ッ":-1350,"ト":521,"ム":1109,"ル":1591,"ロ":2201,"ン":278,"・":-3794,"一":-1619,"下":-1759,"世":-2087,"両":3815,"中":653,"主":-758,"予":-1193,"二":974,"人":2742,"今":792,"他":1889,"以":-1368,"低":811,"何":4265,"作":-361,"保":-2439,"元":4858,"党":3593,"全":1574,"公":-3030,"六":755,"共":-1880,"円":5807,"再":3095,"分":457,"初":2475,"別":1129,"前":2286,"副":4437,"力":365,"動":-949,"務":-1872,"化":1327,"北":-1038,"区":4646,"千":-2309,"午":-783,"協":-1006,"口":483,"右":1233,"各":3588,"合":-241,"同":3906,"和":-837,"員":4513,"国":642,"型":1389,"場":1219,"外":-241,"妻":2016,"学":-1356,"安":-423,"実":-1008,"家":1078,"小":-513,"少":-3102,"州":1155,"市":3197,"平":-1804,"年":2416,"広":-1030,"府":1605,"度":1452,"建":-2352,"当":-3885,"得":1905,"思":-1291,"性":1822,"戸":-488,"指":-3973,"政":-2013,"教":-1479,"数":3222,"文":-1489,"新":1764,"日":2099,"旧":5792,"昨":-661,"時":-1248,"曜":-951,"最":-937,"月":4125,"期":360,"李":3094,"村":364,"東":-805,"核":5156,"森":2438,"業":484,"氏":2613,"民":-1694,"決":-1073,"法":1868,"海":-495,"無":979,"物":461,"特":-3850,"生":-273,"用":914,"町":1215,"的":7313,"直":-1835,"省":792,"県":6293,"知":-1528,"私":4231,"税":401,"立":-960,"第":1201,"米":7767,"系":3066,"約":3663,"級":1384,"統":-4229,"総":1163,"線":1255,"者":6457,"能":725,"自":-2869,"英":785,"見":1044,"調":-562,"財":-733,"費":1777,"車":1835,"軍":1375,"込":-1504,"通":-1136,"選":-681,"郎":1026,"郡":4404,"部":1200,"金":2163,"長":421,"開":-1432,"間":1302,"関":-1282,"雨":2009,"電":-1045,"非":2066,"駅":1620,"1":-800,"」":2670,"・":-3794,"ッ":-1350,"ア":551,"グ":1319,"ス":874,"ト":521,"ム":1109,"ル":1591,"ロ":2201,"ン":278}; + this.UW4__ = {",":3930,".":3508,"―":-4841,"、":3930,"。":3508,"〇":4999,"「":1895,"」":3798,"〓":-5156,"あ":4752,"い":-3435,"う":-640,"え":-2514,"お":2405,"か":530,"が":6006,"き":-4482,"ぎ":-3821,"く":-3788,"け":-4376,"げ":-4734,"こ":2255,"ご":1979,"さ":2864,"し":-843,"じ":-2506,"す":-731,"ず":1251,"せ":181,"そ":4091,"た":5034,"だ":5408,"ち":-3654,"っ":-5882,"つ":-1659,"て":3994,"で":7410,"と":4547,"な":5433,"に":6499,"ぬ":1853,"ね":1413,"の":7396,"は":8578,"ば":1940,"ひ":4249,"び":-4134,"ふ":1345,"へ":6665,"べ":-744,"ほ":1464,"ま":1051,"み":-2082,"む":-882,"め":-5046,"も":4169,"ゃ":-2666,"や":2795,"ょ":-1544,"よ":3351,"ら":-2922,"り":-9726,"る":-14896,"れ":-2613,"ろ":-4570,"わ":-1783,"を":13150,"ん":-2352,"カ":2145,"コ":1789,"セ":1287,"ッ":-724,"ト":-403,"メ":-1635,"ラ":-881,"リ":-541,"ル":-856,"ン":-3637,"・":-4371,"ー":-11870,"一":-2069,"中":2210,"予":782,"事":-190,"井":-1768,"人":1036,"以":544,"会":950,"体":-1286,"作":530,"側":4292,"先":601,"党":-2006,"共":-1212,"内":584,"円":788,"初":1347,"前":1623,"副":3879,"力":-302,"動":-740,"務":-2715,"化":776,"区":4517,"協":1013,"参":1555,"合":-1834,"和":-681,"員":-910,"器":-851,"回":1500,"国":-619,"園":-1200,"地":866,"場":-1410,"塁":-2094,"士":-1413,"多":1067,"大":571,"子":-4802,"学":-1397,"定":-1057,"寺":-809,"小":1910,"屋":-1328,"山":-1500,"島":-2056,"川":-2667,"市":2771,"年":374,"庁":-4556,"後":456,"性":553,"感":916,"所":-1566,"支":856,"改":787,"政":2182,"教":704,"文":522,"方":-856,"日":1798,"時":1829,"最":845,"月":-9066,"木":-485,"来":-442,"校":-360,"業":-1043,"氏":5388,"民":-2716,"気":-910,"沢":-939,"済":-543,"物":-735,"率":672,"球":-1267,"生":-1286,"産":-1101,"田":-2900,"町":1826,"的":2586,"目":922,"省":-3485,"県":2997,"空":-867,"立":-2112,"第":788,"米":2937,"系":786,"約":2171,"経":1146,"統":-1169,"総":940,"線":-994,"署":749,"者":2145,"能":-730,"般":-852,"行":-792,"規":792,"警":-1184,"議":-244,"谷":-1000,"賞":730,"車":-1481,"軍":1158,"輪":-1433,"込":-3370,"近":929,"道":-1291,"選":2596,"郎":-4866,"都":1192,"野":-1100,"銀":-2213,"長":357,"間":-2344,"院":-2297,"際":-2604,"電":-878,"領":-1659,"題":-792,"館":-1984,"首":1749,"高":2120,"「":1895,"」":3798,"・":-4371,"ッ":-724,"ー":-11870,"カ":2145,"コ":1789,"セ":1287,"ト":-403,"メ":-1635,"ラ":-881,"リ":-541,"ル":-856,"ン":-3637}; + this.UW5__ = {",":465,".":-299,"1":-514,"E2":-32768,"]":-2762,"、":465,"。":-299,"「":363,"あ":1655,"い":331,"う":-503,"え":1199,"お":527,"か":647,"が":-421,"き":1624,"ぎ":1971,"く":312,"げ":-983,"さ":-1537,"し":-1371,"す":-852,"だ":-1186,"ち":1093,"っ":52,"つ":921,"て":-18,"で":-850,"と":-127,"ど":1682,"な":-787,"に":-1224,"の":-635,"は":-578,"べ":1001,"み":502,"め":865,"ゃ":3350,"ょ":854,"り":-208,"る":429,"れ":504,"わ":419,"を":-1264,"ん":327,"イ":241,"ル":451,"ン":-343,"中":-871,"京":722,"会":-1153,"党":-654,"務":3519,"区":-901,"告":848,"員":2104,"大":-1296,"学":-548,"定":1785,"嵐":-1304,"市":-2991,"席":921,"年":1763,"思":872,"所":-814,"挙":1618,"新":-1682,"日":218,"月":-4353,"査":932,"格":1356,"機":-1508,"氏":-1347,"田":240,"町":-3912,"的":-3149,"相":1319,"省":-1052,"県":-4003,"研":-997,"社":-278,"空":-813,"統":1955,"者":-2233,"表":663,"語":-1073,"議":1219,"選":-1018,"郎":-368,"長":786,"間":1191,"題":2368,"館":-689,"1":-514,"E2":-32768,"「":363,"イ":241,"ル":451,"ン":-343}; + this.UW6__ = {",":227,".":808,"1":-270,"E1":306,"、":227,"。":808,"あ":-307,"う":189,"か":241,"が":-73,"く":-121,"こ":-200,"じ":1782,"す":383,"た":-428,"っ":573,"て":-1014,"で":101,"と":-105,"な":-253,"に":-149,"の":-417,"は":-236,"も":-206,"り":187,"る":-135,"を":195,"ル":-673,"ン":-496,"一":-277,"中":201,"件":-800,"会":624,"前":302,"区":1792,"員":-1212,"委":798,"学":-960,"市":887,"広":-695,"後":535,"業":-697,"相":753,"社":-507,"福":974,"空":-822,"者":1811,"連":463,"郎":1082,"1":-270,"E1":306,"ル":-673,"ン":-496}; + + return this; + } + TinySegmenter.prototype.ctype_ = function(str) { + for (var i in this.chartype_) { + if (str.match(this.chartype_[i][0])) { + return this.chartype_[i][1]; + } + } + return "O"; + } + + TinySegmenter.prototype.ts_ = function(v) { + if (v) { return v; } + return 0; + } + + TinySegmenter.prototype.segment = function(input) { + if (input == null || input == undefined || input == "") { + return []; + } + var result = []; + var seg = ["B3","B2","B1"]; + var ctype = ["O","O","O"]; + var o = input.split(""); + for (i = 0; i < o.length; ++i) { + seg.push(o[i]); + ctype.push(this.ctype_(o[i])) + } + seg.push("E1"); + seg.push("E2"); + seg.push("E3"); + ctype.push("O"); + ctype.push("O"); + ctype.push("O"); + var word = seg[3]; + var p1 = "U"; + var p2 = "U"; + var p3 = "U"; + for (var i = 4; i < seg.length - 3; ++i) { + var score = this.BIAS__; + var w1 = seg[i-3]; + var w2 = seg[i-2]; + var w3 = seg[i-1]; + var w4 = seg[i]; + var w5 = seg[i+1]; + var w6 = seg[i+2]; + var c1 = ctype[i-3]; + var c2 = ctype[i-2]; + var c3 = ctype[i-1]; + var c4 = ctype[i]; + var c5 = ctype[i+1]; + var c6 = ctype[i+2]; + score += this.ts_(this.UP1__[p1]); + score += this.ts_(this.UP2__[p2]); + score += this.ts_(this.UP3__[p3]); + score += this.ts_(this.BP1__[p1 + p2]); + score += this.ts_(this.BP2__[p2 + p3]); + score += this.ts_(this.UW1__[w1]); + score += this.ts_(this.UW2__[w2]); + score += this.ts_(this.UW3__[w3]); + score += this.ts_(this.UW4__[w4]); + score += this.ts_(this.UW5__[w5]); + score += this.ts_(this.UW6__[w6]); + score += this.ts_(this.BW1__[w2 + w3]); + score += this.ts_(this.BW2__[w3 + w4]); + score += this.ts_(this.BW3__[w4 + w5]); + score += this.ts_(this.TW1__[w1 + w2 + w3]); + score += this.ts_(this.TW2__[w2 + w3 + w4]); + score += this.ts_(this.TW3__[w3 + w4 + w5]); + score += this.ts_(this.TW4__[w4 + w5 + w6]); + score += this.ts_(this.UC1__[c1]); + score += this.ts_(this.UC2__[c2]); + score += this.ts_(this.UC3__[c3]); + score += this.ts_(this.UC4__[c4]); + score += this.ts_(this.UC5__[c5]); + score += this.ts_(this.UC6__[c6]); + score += this.ts_(this.BC1__[c2 + c3]); + score += this.ts_(this.BC2__[c3 + c4]); + score += this.ts_(this.BC3__[c4 + c5]); + score += this.ts_(this.TC1__[c1 + c2 + c3]); + score += this.ts_(this.TC2__[c2 + c3 + c4]); + score += this.ts_(this.TC3__[c3 + c4 + c5]); + score += this.ts_(this.TC4__[c4 + c5 + c6]); + // score += this.ts_(this.TC5__[c4 + c5 + c6]); + score += this.ts_(this.UQ1__[p1 + c1]); + score += this.ts_(this.UQ2__[p2 + c2]); + score += this.ts_(this.UQ3__[p3 + c3]); + score += this.ts_(this.BQ1__[p2 + c2 + c3]); + score += this.ts_(this.BQ2__[p2 + c3 + c4]); + score += this.ts_(this.BQ3__[p3 + c2 + c3]); + score += this.ts_(this.BQ4__[p3 + c3 + c4]); + score += this.ts_(this.TQ1__[p2 + c1 + c2 + c3]); + score += this.ts_(this.TQ2__[p2 + c2 + c3 + c4]); + score += this.ts_(this.TQ3__[p3 + c1 + c2 + c3]); + score += this.ts_(this.TQ4__[p3 + c2 + c3 + c4]); + var p = "O"; + if (score > 0) { + result.push(word); + word = ""; + p = "B"; + } + p1 = p2; + p2 = p3; + p3 = p; + word += seg[i]; + } + result.push(word); + + return result; + } + + lunr.TinySegmenter = TinySegmenter; + }; + +})); \ No newline at end of file diff --git a/assets/javascripts/lunr/wordcut.js b/assets/javascripts/lunr/wordcut.js new file mode 100644 index 000000000..0d898c9ed --- /dev/null +++ b/assets/javascripts/lunr/wordcut.js @@ -0,0 +1,6708 @@ +(function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}(g.lunr || (g.lunr = {})).wordcut = f()}})(function(){var define,module,exports;return (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o 1; + }) + this.addWords(words, false) + } + if(finalize){ + this.finalizeDict(); + } + }, + + dictSeek: function (l, r, ch, strOffset, pos) { + var ans = null; + while (l <= r) { + var m = Math.floor((l + r) / 2), + dict_item = this.dict[m], + len = dict_item.length; + if (len <= strOffset) { + l = m + 1; + } else { + var ch_ = dict_item[strOffset]; + if (ch_ < ch) { + l = m + 1; + } else if (ch_ > ch) { + r = m - 1; + } else { + ans = m; + if (pos == LEFT) { + r = m - 1; + } else { + l = m + 1; + } + } + } + } + return ans; + }, + + isFinal: function (acceptor) { + return this.dict[acceptor.l].length == acceptor.strOffset; + }, + + createAcceptor: function () { + return { + l: 0, + r: this.dict.length - 1, + strOffset: 0, + isFinal: false, + dict: this, + transit: function (ch) { + return this.dict.transit(this, ch); + }, + isError: false, + tag: "DICT", + w: 1, + type: "DICT" + }; + }, + + transit: function (acceptor, ch) { + var l = this.dictSeek(acceptor.l, + acceptor.r, + ch, + acceptor.strOffset, + LEFT); + if (l !== null) { + var r = this.dictSeek(l, + acceptor.r, + ch, + acceptor.strOffset, + RIGHT); + acceptor.l = l; + acceptor.r = r; + acceptor.strOffset++; + acceptor.isFinal = this.isFinal(acceptor); + } else { + acceptor.isError = true; + } + return acceptor; + }, + + sortuniq: function(a){ + return a.sort().filter(function(item, pos, arr){ + return !pos || item != arr[pos - 1]; + }) + }, + + flatten: function(a){ + //[[1,2],[3]] -> [1,2,3] + return [].concat.apply([], a); + } +}; +module.exports = WordcutDict; + +}).call(this,"/dist/tmp") +},{"glob":16,"path":22}],3:[function(require,module,exports){ +var WordRule = { + createAcceptor: function(tag) { + if (tag["WORD_RULE"]) + return null; + + return {strOffset: 0, + isFinal: false, + transit: function(ch) { + var lch = ch.toLowerCase(); + if (lch >= "a" && lch <= "z") { + this.isFinal = true; + this.strOffset++; + } else { + this.isError = true; + } + return this; + }, + isError: false, + tag: "WORD_RULE", + type: "WORD_RULE", + w: 1}; + } +}; + +var NumberRule = { + createAcceptor: function(tag) { + if (tag["NUMBER_RULE"]) + return null; + + return {strOffset: 0, + isFinal: false, + transit: function(ch) { + if (ch >= "0" && ch <= "9") { + this.isFinal = true; + this.strOffset++; + } else { + this.isError = true; + } + return this; + }, + isError: false, + tag: "NUMBER_RULE", + type: "NUMBER_RULE", + w: 1}; + } +}; + +var SpaceRule = { + tag: "SPACE_RULE", + createAcceptor: function(tag) { + + if (tag["SPACE_RULE"]) + return null; + + return {strOffset: 0, + isFinal: false, + transit: function(ch) { + if (ch == " " || ch == "\t" || ch == "\r" || ch == "\n" || + ch == "\u00A0" || ch=="\u2003"//nbsp and emsp + ) { + this.isFinal = true; + this.strOffset++; + } else { + this.isError = true; + } + return this; + }, + isError: false, + tag: SpaceRule.tag, + w: 1, + type: "SPACE_RULE"}; + } +} + +var SingleSymbolRule = { + tag: "SINSYM", + createAcceptor: function(tag) { + return {strOffset: 0, + isFinal: false, + transit: function(ch) { + if (this.strOffset == 0 && ch.match(/^[\@\(\)\/\,\-\."`]$/)) { + this.isFinal = true; + this.strOffset++; + } else { + this.isError = true; + } + return this; + }, + isError: false, + tag: "SINSYM", + w: 1, + type: "SINSYM"}; + } +} + + +var LatinRules = [WordRule, SpaceRule, SingleSymbolRule, NumberRule]; + +module.exports = LatinRules; + +},{}],4:[function(require,module,exports){ +var _ = require("underscore") + , WordcutCore = require("./wordcut_core"); +var PathInfoBuilder = { + + /* + buildByPartAcceptors: function(path, acceptors, i) { + var + var genInfos = partAcceptors.reduce(function(genInfos, acceptor) { + + }, []); + + return genInfos; + } + */ + + buildByAcceptors: function(path, finalAcceptors, i) { + var self = this; + var infos = finalAcceptors.map(function(acceptor) { + var p = i - acceptor.strOffset + 1 + , _info = path[p]; + + var info = {p: p, + mw: _info.mw + (acceptor.mw === undefined ? 0 : acceptor.mw), + w: acceptor.w + _info.w, + unk: (acceptor.unk ? acceptor.unk : 0) + _info.unk, + type: acceptor.type}; + + if (acceptor.type == "PART") { + for(var j = p + 1; j <= i; j++) { + path[j].merge = p; + } + info.merge = p; + } + + return info; + }); + return infos.filter(function(info) { return info; }); + }, + + fallback: function(path, leftBoundary, text, i) { + var _info = path[leftBoundary]; + if (text[i].match(/[\u0E48-\u0E4E]/)) { + if (leftBoundary != 0) + leftBoundary = path[leftBoundary].p; + return {p: leftBoundary, + mw: 0, + w: 1 + _info.w, + unk: 1 + _info.unk, + type: "UNK"}; +/* } else if(leftBoundary > 0 && path[leftBoundary].type !== "UNK") { + leftBoundary = path[leftBoundary].p; + return {p: leftBoundary, + w: 1 + _info.w, + unk: 1 + _info.unk, + type: "UNK"}; */ + } else { + return {p: leftBoundary, + mw: _info.mw, + w: 1 + _info.w, + unk: 1 + _info.unk, + type: "UNK"}; + } + }, + + build: function(path, finalAcceptors, i, leftBoundary, text) { + var basicPathInfos = this.buildByAcceptors(path, finalAcceptors, i); + if (basicPathInfos.length > 0) { + return basicPathInfos; + } else { + return [this.fallback(path, leftBoundary, text, i)]; + } + } +}; + +module.exports = function() { + return _.clone(PathInfoBuilder); +} + +},{"./wordcut_core":8,"underscore":25}],5:[function(require,module,exports){ +var _ = require("underscore"); + + +var PathSelector = { + selectPath: function(paths) { + var path = paths.reduce(function(selectedPath, path) { + if (selectedPath == null) { + return path; + } else { + if (path.unk < selectedPath.unk) + return path; + if (path.unk == selectedPath.unk) { + if (path.mw < selectedPath.mw) + return path + if (path.mw == selectedPath.mw) { + if (path.w < selectedPath.w) + return path; + } + } + return selectedPath; + } + }, null); + return path; + }, + + createPath: function() { + return [{p:null, w:0, unk:0, type: "INIT", mw:0}]; + } +}; + +module.exports = function() { + return _.clone(PathSelector); +}; + +},{"underscore":25}],6:[function(require,module,exports){ +function isMatch(pat, offset, ch) { + if (pat.length <= offset) + return false; + var _ch = pat[offset]; + return _ch == ch || + (_ch.match(/[กข]/) && ch.match(/[ก-ฮ]/)) || + (_ch.match(/[มบ]/) && ch.match(/[ก-ฮ]/)) || + (_ch.match(/\u0E49/) && ch.match(/[\u0E48-\u0E4B]/)); +} + +var Rule0 = { + pat: "เหก็ม", + createAcceptor: function(tag) { + return {strOffset: 0, + isFinal: false, + transit: function(ch) { + if (isMatch(Rule0.pat, this.strOffset,ch)) { + this.isFinal = (this.strOffset + 1 == Rule0.pat.length); + this.strOffset++; + } else { + this.isError = true; + } + return this; + }, + isError: false, + tag: "THAI_RULE", + type: "THAI_RULE", + w: 1}; + } +}; + +var PartRule = { + createAcceptor: function(tag) { + return {strOffset: 0, + patterns: [ + "แก", "เก", "ก้", "กก์", "กา", "กี", "กิ", "กืก" + ], + isFinal: false, + transit: function(ch) { + var offset = this.strOffset; + this.patterns = this.patterns.filter(function(pat) { + return isMatch(pat, offset, ch); + }); + + if (this.patterns.length > 0) { + var len = 1 + offset; + this.isFinal = this.patterns.some(function(pat) { + return pat.length == len; + }); + this.strOffset++; + } else { + this.isError = true; + } + return this; + }, + isError: false, + tag: "PART", + type: "PART", + unk: 1, + w: 1}; + } +}; + +var ThaiRules = [Rule0, PartRule]; + +module.exports = ThaiRules; + +},{}],7:[function(require,module,exports){ +var sys = require("sys") + , WordcutDict = require("./dict") + , WordcutCore = require("./wordcut_core") + , PathInfoBuilder = require("./path_info_builder") + , PathSelector = require("./path_selector") + , Acceptors = require("./acceptors") + , latinRules = require("./latin_rules") + , thaiRules = require("./thai_rules") + , _ = require("underscore"); + + +var Wordcut = Object.create(WordcutCore); +Wordcut.defaultPathInfoBuilder = PathInfoBuilder; +Wordcut.defaultPathSelector = PathSelector; +Wordcut.defaultAcceptors = Acceptors; +Wordcut.defaultLatinRules = latinRules; +Wordcut.defaultThaiRules = thaiRules; +Wordcut.defaultDict = WordcutDict; + + +Wordcut.initNoDict = function(dict_path) { + var self = this; + self.pathInfoBuilder = new self.defaultPathInfoBuilder; + self.pathSelector = new self.defaultPathSelector; + self.acceptors = new self.defaultAcceptors; + self.defaultLatinRules.forEach(function(rule) { + self.acceptors.creators.push(rule); + }); + self.defaultThaiRules.forEach(function(rule) { + self.acceptors.creators.push(rule); + }); +}; + +Wordcut.init = function(dict_path, withDefault, additionalWords) { + withDefault = withDefault || false; + this.initNoDict(); + var dict = _.clone(this.defaultDict); + dict.init(dict_path, withDefault, additionalWords); + this.acceptors.creators.push(dict); +}; + +module.exports = Wordcut; + +},{"./acceptors":1,"./dict":2,"./latin_rules":3,"./path_info_builder":4,"./path_selector":5,"./thai_rules":6,"./wordcut_core":8,"sys":28,"underscore":25}],8:[function(require,module,exports){ +var WordcutCore = { + + buildPath: function(text) { + var self = this + , path = self.pathSelector.createPath() + , leftBoundary = 0; + self.acceptors.reset(); + for (var i = 0; i < text.length; i++) { + var ch = text[i]; + self.acceptors.transit(ch); + + var possiblePathInfos = self + .pathInfoBuilder + .build(path, + self.acceptors.getFinalAcceptors(), + i, + leftBoundary, + text); + var selectedPath = self.pathSelector.selectPath(possiblePathInfos) + + path.push(selectedPath); + if (selectedPath.type !== "UNK") { + leftBoundary = i; + } + } + return path; + }, + + pathToRanges: function(path) { + var e = path.length - 1 + , ranges = []; + + while (e > 0) { + var info = path[e] + , s = info.p; + + if (info.merge !== undefined && ranges.length > 0) { + var r = ranges[ranges.length - 1]; + r.s = info.merge; + s = r.s; + } else { + ranges.push({s:s, e:e}); + } + e = s; + } + return ranges.reverse(); + }, + + rangesToText: function(text, ranges, delimiter) { + return ranges.map(function(r) { + return text.substring(r.s, r.e); + }).join(delimiter); + }, + + cut: function(text, delimiter) { + var path = this.buildPath(text) + , ranges = this.pathToRanges(path); + return this + .rangesToText(text, ranges, + (delimiter === undefined ? "|" : delimiter)); + }, + + cutIntoRanges: function(text, noText) { + var path = this.buildPath(text) + , ranges = this.pathToRanges(path); + + if (!noText) { + ranges.forEach(function(r) { + r.text = text.substring(r.s, r.e); + }); + } + return ranges; + }, + + cutIntoArray: function(text) { + var path = this.buildPath(text) + , ranges = this.pathToRanges(path); + + return ranges.map(function(r) { + return text.substring(r.s, r.e) + }); + } +}; + +module.exports = WordcutCore; + +},{}],9:[function(require,module,exports){ +// http://wiki.commonjs.org/wiki/Unit_Testing/1.0 +// +// THIS IS NOT TESTED NOR LIKELY TO WORK OUTSIDE V8! +// +// Originally from narwhal.js (http://narwhaljs.org) +// Copyright (c) 2009 Thomas Robinson <280north.com> +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the 'Software'), to +// deal in the Software without restriction, including without limitation the +// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +// sell copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +// ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// when used in node, this will actually load the util module we depend on +// versus loading the builtin util module as happens otherwise +// this is a bug in node module loading as far as I am concerned +var util = require('util/'); + +var pSlice = Array.prototype.slice; +var hasOwn = Object.prototype.hasOwnProperty; + +// 1. The assert module provides functions that throw +// AssertionError's when particular conditions are not met. The +// assert module must conform to the following interface. + +var assert = module.exports = ok; + +// 2. The AssertionError is defined in assert. +// new assert.AssertionError({ message: message, +// actual: actual, +// expected: expected }) + +assert.AssertionError = function AssertionError(options) { + this.name = 'AssertionError'; + this.actual = options.actual; + this.expected = options.expected; + this.operator = options.operator; + if (options.message) { + this.message = options.message; + this.generatedMessage = false; + } else { + this.message = getMessage(this); + this.generatedMessage = true; + } + var stackStartFunction = options.stackStartFunction || fail; + + if (Error.captureStackTrace) { + Error.captureStackTrace(this, stackStartFunction); + } + else { + // non v8 browsers so we can have a stacktrace + var err = new Error(); + if (err.stack) { + var out = err.stack; + + // try to strip useless frames + var fn_name = stackStartFunction.name; + var idx = out.indexOf('\n' + fn_name); + if (idx >= 0) { + // once we have located the function frame + // we need to strip out everything before it (and its line) + var next_line = out.indexOf('\n', idx + 1); + out = out.substring(next_line + 1); + } + + this.stack = out; + } + } +}; + +// assert.AssertionError instanceof Error +util.inherits(assert.AssertionError, Error); + +function replacer(key, value) { + if (util.isUndefined(value)) { + return '' + value; + } + if (util.isNumber(value) && !isFinite(value)) { + return value.toString(); + } + if (util.isFunction(value) || util.isRegExp(value)) { + return value.toString(); + } + return value; +} + +function truncate(s, n) { + if (util.isString(s)) { + return s.length < n ? s : s.slice(0, n); + } else { + return s; + } +} + +function getMessage(self) { + return truncate(JSON.stringify(self.actual, replacer), 128) + ' ' + + self.operator + ' ' + + truncate(JSON.stringify(self.expected, replacer), 128); +} + +// At present only the three keys mentioned above are used and +// understood by the spec. Implementations or sub modules can pass +// other keys to the AssertionError's constructor - they will be +// ignored. + +// 3. All of the following functions must throw an AssertionError +// when a corresponding condition is not met, with a message that +// may be undefined if not provided. All assertion methods provide +// both the actual and expected values to the assertion error for +// display purposes. + +function fail(actual, expected, message, operator, stackStartFunction) { + throw new assert.AssertionError({ + message: message, + actual: actual, + expected: expected, + operator: operator, + stackStartFunction: stackStartFunction + }); +} + +// EXTENSION! allows for well behaved errors defined elsewhere. +assert.fail = fail; + +// 4. Pure assertion tests whether a value is truthy, as determined +// by !!guard. +// assert.ok(guard, message_opt); +// This statement is equivalent to assert.equal(true, !!guard, +// message_opt);. To test strictly for the value true, use +// assert.strictEqual(true, guard, message_opt);. + +function ok(value, message) { + if (!value) fail(value, true, message, '==', assert.ok); +} +assert.ok = ok; + +// 5. The equality assertion tests shallow, coercive equality with +// ==. +// assert.equal(actual, expected, message_opt); + +assert.equal = function equal(actual, expected, message) { + if (actual != expected) fail(actual, expected, message, '==', assert.equal); +}; + +// 6. The non-equality assertion tests for whether two objects are not equal +// with != assert.notEqual(actual, expected, message_opt); + +assert.notEqual = function notEqual(actual, expected, message) { + if (actual == expected) { + fail(actual, expected, message, '!=', assert.notEqual); + } +}; + +// 7. The equivalence assertion tests a deep equality relation. +// assert.deepEqual(actual, expected, message_opt); + +assert.deepEqual = function deepEqual(actual, expected, message) { + if (!_deepEqual(actual, expected)) { + fail(actual, expected, message, 'deepEqual', assert.deepEqual); + } +}; + +function _deepEqual(actual, expected) { + // 7.1. All identical values are equivalent, as determined by ===. + if (actual === expected) { + return true; + + } else if (util.isBuffer(actual) && util.isBuffer(expected)) { + if (actual.length != expected.length) return false; + + for (var i = 0; i < actual.length; i++) { + if (actual[i] !== expected[i]) return false; + } + + return true; + + // 7.2. If the expected value is a Date object, the actual value is + // equivalent if it is also a Date object that refers to the same time. + } else if (util.isDate(actual) && util.isDate(expected)) { + return actual.getTime() === expected.getTime(); + + // 7.3 If the expected value is a RegExp object, the actual value is + // equivalent if it is also a RegExp object with the same source and + // properties (`global`, `multiline`, `lastIndex`, `ignoreCase`). + } else if (util.isRegExp(actual) && util.isRegExp(expected)) { + return actual.source === expected.source && + actual.global === expected.global && + actual.multiline === expected.multiline && + actual.lastIndex === expected.lastIndex && + actual.ignoreCase === expected.ignoreCase; + + // 7.4. Other pairs that do not both pass typeof value == 'object', + // equivalence is determined by ==. + } else if (!util.isObject(actual) && !util.isObject(expected)) { + return actual == expected; + + // 7.5 For all other Object pairs, including Array objects, equivalence is + // determined by having the same number of owned properties (as verified + // with Object.prototype.hasOwnProperty.call), the same set of keys + // (although not necessarily the same order), equivalent values for every + // corresponding key, and an identical 'prototype' property. Note: this + // accounts for both named and indexed properties on Arrays. + } else { + return objEquiv(actual, expected); + } +} + +function isArguments(object) { + return Object.prototype.toString.call(object) == '[object Arguments]'; +} + +function objEquiv(a, b) { + if (util.isNullOrUndefined(a) || util.isNullOrUndefined(b)) + return false; + // an identical 'prototype' property. + if (a.prototype !== b.prototype) return false; + // if one is a primitive, the other must be same + if (util.isPrimitive(a) || util.isPrimitive(b)) { + return a === b; + } + var aIsArgs = isArguments(a), + bIsArgs = isArguments(b); + if ((aIsArgs && !bIsArgs) || (!aIsArgs && bIsArgs)) + return false; + if (aIsArgs) { + a = pSlice.call(a); + b = pSlice.call(b); + return _deepEqual(a, b); + } + var ka = objectKeys(a), + kb = objectKeys(b), + key, i; + // having the same number of owned properties (keys incorporates + // hasOwnProperty) + if (ka.length != kb.length) + return false; + //the same set of keys (although not necessarily the same order), + ka.sort(); + kb.sort(); + //~~~cheap key test + for (i = ka.length - 1; i >= 0; i--) { + if (ka[i] != kb[i]) + return false; + } + //equivalent values for every corresponding key, and + //~~~possibly expensive deep test + for (i = ka.length - 1; i >= 0; i--) { + key = ka[i]; + if (!_deepEqual(a[key], b[key])) return false; + } + return true; +} + +// 8. The non-equivalence assertion tests for any deep inequality. +// assert.notDeepEqual(actual, expected, message_opt); + +assert.notDeepEqual = function notDeepEqual(actual, expected, message) { + if (_deepEqual(actual, expected)) { + fail(actual, expected, message, 'notDeepEqual', assert.notDeepEqual); + } +}; + +// 9. The strict equality assertion tests strict equality, as determined by ===. +// assert.strictEqual(actual, expected, message_opt); + +assert.strictEqual = function strictEqual(actual, expected, message) { + if (actual !== expected) { + fail(actual, expected, message, '===', assert.strictEqual); + } +}; + +// 10. The strict non-equality assertion tests for strict inequality, as +// determined by !==. assert.notStrictEqual(actual, expected, message_opt); + +assert.notStrictEqual = function notStrictEqual(actual, expected, message) { + if (actual === expected) { + fail(actual, expected, message, '!==', assert.notStrictEqual); + } +}; + +function expectedException(actual, expected) { + if (!actual || !expected) { + return false; + } + + if (Object.prototype.toString.call(expected) == '[object RegExp]') { + return expected.test(actual); + } else if (actual instanceof expected) { + return true; + } else if (expected.call({}, actual) === true) { + return true; + } + + return false; +} + +function _throws(shouldThrow, block, expected, message) { + var actual; + + if (util.isString(expected)) { + message = expected; + expected = null; + } + + try { + block(); + } catch (e) { + actual = e; + } + + message = (expected && expected.name ? ' (' + expected.name + ').' : '.') + + (message ? ' ' + message : '.'); + + if (shouldThrow && !actual) { + fail(actual, expected, 'Missing expected exception' + message); + } + + if (!shouldThrow && expectedException(actual, expected)) { + fail(actual, expected, 'Got unwanted exception' + message); + } + + if ((shouldThrow && actual && expected && + !expectedException(actual, expected)) || (!shouldThrow && actual)) { + throw actual; + } +} + +// 11. Expected to throw an error: +// assert.throws(block, Error_opt, message_opt); + +assert.throws = function(block, /*optional*/error, /*optional*/message) { + _throws.apply(this, [true].concat(pSlice.call(arguments))); +}; + +// EXTENSION! This is annoying to write outside this module. +assert.doesNotThrow = function(block, /*optional*/message) { + _throws.apply(this, [false].concat(pSlice.call(arguments))); +}; + +assert.ifError = function(err) { if (err) {throw err;}}; + +var objectKeys = Object.keys || function (obj) { + var keys = []; + for (var key in obj) { + if (hasOwn.call(obj, key)) keys.push(key); + } + return keys; +}; + +},{"util/":28}],10:[function(require,module,exports){ +'use strict'; +module.exports = balanced; +function balanced(a, b, str) { + if (a instanceof RegExp) a = maybeMatch(a, str); + if (b instanceof RegExp) b = maybeMatch(b, str); + + var r = range(a, b, str); + + return r && { + start: r[0], + end: r[1], + pre: str.slice(0, r[0]), + body: str.slice(r[0] + a.length, r[1]), + post: str.slice(r[1] + b.length) + }; +} + +function maybeMatch(reg, str) { + var m = str.match(reg); + return m ? m[0] : null; +} + +balanced.range = range; +function range(a, b, str) { + var begs, beg, left, right, result; + var ai = str.indexOf(a); + var bi = str.indexOf(b, ai + 1); + var i = ai; + + if (ai >= 0 && bi > 0) { + begs = []; + left = str.length; + + while (i >= 0 && !result) { + if (i == ai) { + begs.push(i); + ai = str.indexOf(a, i + 1); + } else if (begs.length == 1) { + result = [ begs.pop(), bi ]; + } else { + beg = begs.pop(); + if (beg < left) { + left = beg; + right = bi; + } + + bi = str.indexOf(b, i + 1); + } + + i = ai < bi && ai >= 0 ? ai : bi; + } + + if (begs.length) { + result = [ left, right ]; + } + } + + return result; +} + +},{}],11:[function(require,module,exports){ +var concatMap = require('concat-map'); +var balanced = require('balanced-match'); + +module.exports = expandTop; + +var escSlash = '\0SLASH'+Math.random()+'\0'; +var escOpen = '\0OPEN'+Math.random()+'\0'; +var escClose = '\0CLOSE'+Math.random()+'\0'; +var escComma = '\0COMMA'+Math.random()+'\0'; +var escPeriod = '\0PERIOD'+Math.random()+'\0'; + +function numeric(str) { + return parseInt(str, 10) == str + ? parseInt(str, 10) + : str.charCodeAt(0); +} + +function escapeBraces(str) { + return str.split('\\\\').join(escSlash) + .split('\\{').join(escOpen) + .split('\\}').join(escClose) + .split('\\,').join(escComma) + .split('\\.').join(escPeriod); +} + +function unescapeBraces(str) { + return str.split(escSlash).join('\\') + .split(escOpen).join('{') + .split(escClose).join('}') + .split(escComma).join(',') + .split(escPeriod).join('.'); +} + + +// Basically just str.split(","), but handling cases +// where we have nested braced sections, which should be +// treated as individual members, like {a,{b,c},d} +function parseCommaParts(str) { + if (!str) + return ['']; + + var parts = []; + var m = balanced('{', '}', str); + + if (!m) + return str.split(','); + + var pre = m.pre; + var body = m.body; + var post = m.post; + var p = pre.split(','); + + p[p.length-1] += '{' + body + '}'; + var postParts = parseCommaParts(post); + if (post.length) { + p[p.length-1] += postParts.shift(); + p.push.apply(p, postParts); + } + + parts.push.apply(parts, p); + + return parts; +} + +function expandTop(str) { + if (!str) + return []; + + // I don't know why Bash 4.3 does this, but it does. + // Anything starting with {} will have the first two bytes preserved + // but *only* at the top level, so {},a}b will not expand to anything, + // but a{},b}c will be expanded to [a}c,abc]. + // One could argue that this is a bug in Bash, but since the goal of + // this module is to match Bash's rules, we escape a leading {} + if (str.substr(0, 2) === '{}') { + str = '\\{\\}' + str.substr(2); + } + + return expand(escapeBraces(str), true).map(unescapeBraces); +} + +function identity(e) { + return e; +} + +function embrace(str) { + return '{' + str + '}'; +} +function isPadded(el) { + return /^-?0\d/.test(el); +} + +function lte(i, y) { + return i <= y; +} +function gte(i, y) { + return i >= y; +} + +function expand(str, isTop) { + var expansions = []; + + var m = balanced('{', '}', str); + if (!m || /\$$/.test(m.pre)) return [str]; + + var isNumericSequence = /^-?\d+\.\.-?\d+(?:\.\.-?\d+)?$/.test(m.body); + var isAlphaSequence = /^[a-zA-Z]\.\.[a-zA-Z](?:\.\.-?\d+)?$/.test(m.body); + var isSequence = isNumericSequence || isAlphaSequence; + var isOptions = m.body.indexOf(',') >= 0; + if (!isSequence && !isOptions) { + // {a},b} + if (m.post.match(/,.*\}/)) { + str = m.pre + '{' + m.body + escClose + m.post; + return expand(str); + } + return [str]; + } + + var n; + if (isSequence) { + n = m.body.split(/\.\./); + } else { + n = parseCommaParts(m.body); + if (n.length === 1) { + // x{{a,b}}y ==> x{a}y x{b}y + n = expand(n[0], false).map(embrace); + if (n.length === 1) { + var post = m.post.length + ? expand(m.post, false) + : ['']; + return post.map(function(p) { + return m.pre + n[0] + p; + }); + } + } + } + + // at this point, n is the parts, and we know it's not a comma set + // with a single entry. + + // no need to expand pre, since it is guaranteed to be free of brace-sets + var pre = m.pre; + var post = m.post.length + ? expand(m.post, false) + : ['']; + + var N; + + if (isSequence) { + var x = numeric(n[0]); + var y = numeric(n[1]); + var width = Math.max(n[0].length, n[1].length) + var incr = n.length == 3 + ? Math.abs(numeric(n[2])) + : 1; + var test = lte; + var reverse = y < x; + if (reverse) { + incr *= -1; + test = gte; + } + var pad = n.some(isPadded); + + N = []; + + for (var i = x; test(i, y); i += incr) { + var c; + if (isAlphaSequence) { + c = String.fromCharCode(i); + if (c === '\\') + c = ''; + } else { + c = String(i); + if (pad) { + var need = width - c.length; + if (need > 0) { + var z = new Array(need + 1).join('0'); + if (i < 0) + c = '-' + z + c.slice(1); + else + c = z + c; + } + } + } + N.push(c); + } + } else { + N = concatMap(n, function(el) { return expand(el, false) }); + } + + for (var j = 0; j < N.length; j++) { + for (var k = 0; k < post.length; k++) { + var expansion = pre + N[j] + post[k]; + if (!isTop || isSequence || expansion) + expansions.push(expansion); + } + } + + return expansions; +} + + +},{"balanced-match":10,"concat-map":13}],12:[function(require,module,exports){ + +},{}],13:[function(require,module,exports){ +module.exports = function (xs, fn) { + var res = []; + for (var i = 0; i < xs.length; i++) { + var x = fn(xs[i], i); + if (isArray(x)) res.push.apply(res, x); + else res.push(x); + } + return res; +}; + +var isArray = Array.isArray || function (xs) { + return Object.prototype.toString.call(xs) === '[object Array]'; +}; + +},{}],14:[function(require,module,exports){ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +function EventEmitter() { + this._events = this._events || {}; + this._maxListeners = this._maxListeners || undefined; +} +module.exports = EventEmitter; + +// Backwards-compat with node 0.10.x +EventEmitter.EventEmitter = EventEmitter; + +EventEmitter.prototype._events = undefined; +EventEmitter.prototype._maxListeners = undefined; + +// By default EventEmitters will print a warning if more than 10 listeners are +// added to it. This is a useful default which helps finding memory leaks. +EventEmitter.defaultMaxListeners = 10; + +// Obviously not all Emitters should be limited to 10. This function allows +// that to be increased. Set to zero for unlimited. +EventEmitter.prototype.setMaxListeners = function(n) { + if (!isNumber(n) || n < 0 || isNaN(n)) + throw TypeError('n must be a positive number'); + this._maxListeners = n; + return this; +}; + +EventEmitter.prototype.emit = function(type) { + var er, handler, len, args, i, listeners; + + if (!this._events) + this._events = {}; + + // If there is no 'error' event listener then throw. + if (type === 'error') { + if (!this._events.error || + (isObject(this._events.error) && !this._events.error.length)) { + er = arguments[1]; + if (er instanceof Error) { + throw er; // Unhandled 'error' event + } + throw TypeError('Uncaught, unspecified "error" event.'); + } + } + + handler = this._events[type]; + + if (isUndefined(handler)) + return false; + + if (isFunction(handler)) { + switch (arguments.length) { + // fast cases + case 1: + handler.call(this); + break; + case 2: + handler.call(this, arguments[1]); + break; + case 3: + handler.call(this, arguments[1], arguments[2]); + break; + // slower + default: + len = arguments.length; + args = new Array(len - 1); + for (i = 1; i < len; i++) + args[i - 1] = arguments[i]; + handler.apply(this, args); + } + } else if (isObject(handler)) { + len = arguments.length; + args = new Array(len - 1); + for (i = 1; i < len; i++) + args[i - 1] = arguments[i]; + + listeners = handler.slice(); + len = listeners.length; + for (i = 0; i < len; i++) + listeners[i].apply(this, args); + } + + return true; +}; + +EventEmitter.prototype.addListener = function(type, listener) { + var m; + + if (!isFunction(listener)) + throw TypeError('listener must be a function'); + + if (!this._events) + this._events = {}; + + // To avoid recursion in the case that type === "newListener"! Before + // adding it to the listeners, first emit "newListener". + if (this._events.newListener) + this.emit('newListener', type, + isFunction(listener.listener) ? + listener.listener : listener); + + if (!this._events[type]) + // Optimize the case of one listener. Don't need the extra array object. + this._events[type] = listener; + else if (isObject(this._events[type])) + // If we've already got an array, just append. + this._events[type].push(listener); + else + // Adding the second element, need to change to array. + this._events[type] = [this._events[type], listener]; + + // Check for listener leak + if (isObject(this._events[type]) && !this._events[type].warned) { + var m; + if (!isUndefined(this._maxListeners)) { + m = this._maxListeners; + } else { + m = EventEmitter.defaultMaxListeners; + } + + if (m && m > 0 && this._events[type].length > m) { + this._events[type].warned = true; + console.error('(node) warning: possible EventEmitter memory ' + + 'leak detected. %d listeners added. ' + + 'Use emitter.setMaxListeners() to increase limit.', + this._events[type].length); + if (typeof console.trace === 'function') { + // not supported in IE 10 + console.trace(); + } + } + } + + return this; +}; + +EventEmitter.prototype.on = EventEmitter.prototype.addListener; + +EventEmitter.prototype.once = function(type, listener) { + if (!isFunction(listener)) + throw TypeError('listener must be a function'); + + var fired = false; + + function g() { + this.removeListener(type, g); + + if (!fired) { + fired = true; + listener.apply(this, arguments); + } + } + + g.listener = listener; + this.on(type, g); + + return this; +}; + +// emits a 'removeListener' event iff the listener was removed +EventEmitter.prototype.removeListener = function(type, listener) { + var list, position, length, i; + + if (!isFunction(listener)) + throw TypeError('listener must be a function'); + + if (!this._events || !this._events[type]) + return this; + + list = this._events[type]; + length = list.length; + position = -1; + + if (list === listener || + (isFunction(list.listener) && list.listener === listener)) { + delete this._events[type]; + if (this._events.removeListener) + this.emit('removeListener', type, listener); + + } else if (isObject(list)) { + for (i = length; i-- > 0;) { + if (list[i] === listener || + (list[i].listener && list[i].listener === listener)) { + position = i; + break; + } + } + + if (position < 0) + return this; + + if (list.length === 1) { + list.length = 0; + delete this._events[type]; + } else { + list.splice(position, 1); + } + + if (this._events.removeListener) + this.emit('removeListener', type, listener); + } + + return this; +}; + +EventEmitter.prototype.removeAllListeners = function(type) { + var key, listeners; + + if (!this._events) + return this; + + // not listening for removeListener, no need to emit + if (!this._events.removeListener) { + if (arguments.length === 0) + this._events = {}; + else if (this._events[type]) + delete this._events[type]; + return this; + } + + // emit removeListener for all listeners on all events + if (arguments.length === 0) { + for (key in this._events) { + if (key === 'removeListener') continue; + this.removeAllListeners(key); + } + this.removeAllListeners('removeListener'); + this._events = {}; + return this; + } + + listeners = this._events[type]; + + if (isFunction(listeners)) { + this.removeListener(type, listeners); + } else { + // LIFO order + while (listeners.length) + this.removeListener(type, listeners[listeners.length - 1]); + } + delete this._events[type]; + + return this; +}; + +EventEmitter.prototype.listeners = function(type) { + var ret; + if (!this._events || !this._events[type]) + ret = []; + else if (isFunction(this._events[type])) + ret = [this._events[type]]; + else + ret = this._events[type].slice(); + return ret; +}; + +EventEmitter.listenerCount = function(emitter, type) { + var ret; + if (!emitter._events || !emitter._events[type]) + ret = 0; + else if (isFunction(emitter._events[type])) + ret = 1; + else + ret = emitter._events[type].length; + return ret; +}; + +function isFunction(arg) { + return typeof arg === 'function'; +} + +function isNumber(arg) { + return typeof arg === 'number'; +} + +function isObject(arg) { + return typeof arg === 'object' && arg !== null; +} + +function isUndefined(arg) { + return arg === void 0; +} + +},{}],15:[function(require,module,exports){ +(function (process){ +exports.alphasort = alphasort +exports.alphasorti = alphasorti +exports.setopts = setopts +exports.ownProp = ownProp +exports.makeAbs = makeAbs +exports.finish = finish +exports.mark = mark +exports.isIgnored = isIgnored +exports.childrenIgnored = childrenIgnored + +function ownProp (obj, field) { + return Object.prototype.hasOwnProperty.call(obj, field) +} + +var path = require("path") +var minimatch = require("minimatch") +var isAbsolute = require("path-is-absolute") +var Minimatch = minimatch.Minimatch + +function alphasorti (a, b) { + return a.toLowerCase().localeCompare(b.toLowerCase()) +} + +function alphasort (a, b) { + return a.localeCompare(b) +} + +function setupIgnores (self, options) { + self.ignore = options.ignore || [] + + if (!Array.isArray(self.ignore)) + self.ignore = [self.ignore] + + if (self.ignore.length) { + self.ignore = self.ignore.map(ignoreMap) + } +} + +function ignoreMap (pattern) { + var gmatcher = null + if (pattern.slice(-3) === '/**') { + var gpattern = pattern.replace(/(\/\*\*)+$/, '') + gmatcher = new Minimatch(gpattern) + } + + return { + matcher: new Minimatch(pattern), + gmatcher: gmatcher + } +} + +function setopts (self, pattern, options) { + if (!options) + options = {} + + // base-matching: just use globstar for that. + if (options.matchBase && -1 === pattern.indexOf("/")) { + if (options.noglobstar) { + throw new Error("base matching requires globstar") + } + pattern = "**/" + pattern + } + + self.silent = !!options.silent + self.pattern = pattern + self.strict = options.strict !== false + self.realpath = !!options.realpath + self.realpathCache = options.realpathCache || Object.create(null) + self.follow = !!options.follow + self.dot = !!options.dot + self.mark = !!options.mark + self.nodir = !!options.nodir + if (self.nodir) + self.mark = true + self.sync = !!options.sync + self.nounique = !!options.nounique + self.nonull = !!options.nonull + self.nosort = !!options.nosort + self.nocase = !!options.nocase + self.stat = !!options.stat + self.noprocess = !!options.noprocess + + self.maxLength = options.maxLength || Infinity + self.cache = options.cache || Object.create(null) + self.statCache = options.statCache || Object.create(null) + self.symlinks = options.symlinks || Object.create(null) + + setupIgnores(self, options) + + self.changedCwd = false + var cwd = process.cwd() + if (!ownProp(options, "cwd")) + self.cwd = cwd + else { + self.cwd = options.cwd + self.changedCwd = path.resolve(options.cwd) !== cwd + } + + self.root = options.root || path.resolve(self.cwd, "/") + self.root = path.resolve(self.root) + if (process.platform === "win32") + self.root = self.root.replace(/\\/g, "/") + + self.nomount = !!options.nomount + + // disable comments and negation unless the user explicitly + // passes in false as the option. + options.nonegate = options.nonegate === false ? false : true + options.nocomment = options.nocomment === false ? false : true + deprecationWarning(options) + + self.minimatch = new Minimatch(pattern, options) + self.options = self.minimatch.options +} + +// TODO(isaacs): remove entirely in v6 +// exported to reset in tests +exports.deprecationWarned +function deprecationWarning(options) { + if (!options.nonegate || !options.nocomment) { + if (process.noDeprecation !== true && !exports.deprecationWarned) { + var msg = 'glob WARNING: comments and negation will be disabled in v6' + if (process.throwDeprecation) + throw new Error(msg) + else if (process.traceDeprecation) + console.trace(msg) + else + console.error(msg) + + exports.deprecationWarned = true + } + } +} + +function finish (self) { + var nou = self.nounique + var all = nou ? [] : Object.create(null) + + for (var i = 0, l = self.matches.length; i < l; i ++) { + var matches = self.matches[i] + if (!matches || Object.keys(matches).length === 0) { + if (self.nonull) { + // do like the shell, and spit out the literal glob + var literal = self.minimatch.globSet[i] + if (nou) + all.push(literal) + else + all[literal] = true + } + } else { + // had matches + var m = Object.keys(matches) + if (nou) + all.push.apply(all, m) + else + m.forEach(function (m) { + all[m] = true + }) + } + } + + if (!nou) + all = Object.keys(all) + + if (!self.nosort) + all = all.sort(self.nocase ? alphasorti : alphasort) + + // at *some* point we statted all of these + if (self.mark) { + for (var i = 0; i < all.length; i++) { + all[i] = self._mark(all[i]) + } + if (self.nodir) { + all = all.filter(function (e) { + return !(/\/$/.test(e)) + }) + } + } + + if (self.ignore.length) + all = all.filter(function(m) { + return !isIgnored(self, m) + }) + + self.found = all +} + +function mark (self, p) { + var abs = makeAbs(self, p) + var c = self.cache[abs] + var m = p + if (c) { + var isDir = c === 'DIR' || Array.isArray(c) + var slash = p.slice(-1) === '/' + + if (isDir && !slash) + m += '/' + else if (!isDir && slash) + m = m.slice(0, -1) + + if (m !== p) { + var mabs = makeAbs(self, m) + self.statCache[mabs] = self.statCache[abs] + self.cache[mabs] = self.cache[abs] + } + } + + return m +} + +// lotta situps... +function makeAbs (self, f) { + var abs = f + if (f.charAt(0) === '/') { + abs = path.join(self.root, f) + } else if (isAbsolute(f) || f === '') { + abs = f + } else if (self.changedCwd) { + abs = path.resolve(self.cwd, f) + } else { + abs = path.resolve(f) + } + return abs +} + + +// Return true, if pattern ends with globstar '**', for the accompanying parent directory. +// Ex:- If node_modules/** is the pattern, add 'node_modules' to ignore list along with it's contents +function isIgnored (self, path) { + if (!self.ignore.length) + return false + + return self.ignore.some(function(item) { + return item.matcher.match(path) || !!(item.gmatcher && item.gmatcher.match(path)) + }) +} + +function childrenIgnored (self, path) { + if (!self.ignore.length) + return false + + return self.ignore.some(function(item) { + return !!(item.gmatcher && item.gmatcher.match(path)) + }) +} + +}).call(this,require('_process')) +},{"_process":24,"minimatch":20,"path":22,"path-is-absolute":23}],16:[function(require,module,exports){ +(function (process){ +// Approach: +// +// 1. Get the minimatch set +// 2. For each pattern in the set, PROCESS(pattern, false) +// 3. Store matches per-set, then uniq them +// +// PROCESS(pattern, inGlobStar) +// Get the first [n] items from pattern that are all strings +// Join these together. This is PREFIX. +// If there is no more remaining, then stat(PREFIX) and +// add to matches if it succeeds. END. +// +// If inGlobStar and PREFIX is symlink and points to dir +// set ENTRIES = [] +// else readdir(PREFIX) as ENTRIES +// If fail, END +// +// with ENTRIES +// If pattern[n] is GLOBSTAR +// // handle the case where the globstar match is empty +// // by pruning it out, and testing the resulting pattern +// PROCESS(pattern[0..n] + pattern[n+1 .. $], false) +// // handle other cases. +// for ENTRY in ENTRIES (not dotfiles) +// // attach globstar + tail onto the entry +// // Mark that this entry is a globstar match +// PROCESS(pattern[0..n] + ENTRY + pattern[n .. $], true) +// +// else // not globstar +// for ENTRY in ENTRIES (not dotfiles, unless pattern[n] is dot) +// Test ENTRY against pattern[n] +// If fails, continue +// If passes, PROCESS(pattern[0..n] + item + pattern[n+1 .. $]) +// +// Caveat: +// Cache all stats and readdirs results to minimize syscall. Since all +// we ever care about is existence and directory-ness, we can just keep +// `true` for files, and [children,...] for directories, or `false` for +// things that don't exist. + +module.exports = glob + +var fs = require('fs') +var minimatch = require('minimatch') +var Minimatch = minimatch.Minimatch +var inherits = require('inherits') +var EE = require('events').EventEmitter +var path = require('path') +var assert = require('assert') +var isAbsolute = require('path-is-absolute') +var globSync = require('./sync.js') +var common = require('./common.js') +var alphasort = common.alphasort +var alphasorti = common.alphasorti +var setopts = common.setopts +var ownProp = common.ownProp +var inflight = require('inflight') +var util = require('util') +var childrenIgnored = common.childrenIgnored +var isIgnored = common.isIgnored + +var once = require('once') + +function glob (pattern, options, cb) { + if (typeof options === 'function') cb = options, options = {} + if (!options) options = {} + + if (options.sync) { + if (cb) + throw new TypeError('callback provided to sync glob') + return globSync(pattern, options) + } + + return new Glob(pattern, options, cb) +} + +glob.sync = globSync +var GlobSync = glob.GlobSync = globSync.GlobSync + +// old api surface +glob.glob = glob + +glob.hasMagic = function (pattern, options_) { + var options = util._extend({}, options_) + options.noprocess = true + + var g = new Glob(pattern, options) + var set = g.minimatch.set + if (set.length > 1) + return true + + for (var j = 0; j < set[0].length; j++) { + if (typeof set[0][j] !== 'string') + return true + } + + return false +} + +glob.Glob = Glob +inherits(Glob, EE) +function Glob (pattern, options, cb) { + if (typeof options === 'function') { + cb = options + options = null + } + + if (options && options.sync) { + if (cb) + throw new TypeError('callback provided to sync glob') + return new GlobSync(pattern, options) + } + + if (!(this instanceof Glob)) + return new Glob(pattern, options, cb) + + setopts(this, pattern, options) + this._didRealPath = false + + // process each pattern in the minimatch set + var n = this.minimatch.set.length + + // The matches are stored as {: true,...} so that + // duplicates are automagically pruned. + // Later, we do an Object.keys() on these. + // Keep them as a list so we can fill in when nonull is set. + this.matches = new Array(n) + + if (typeof cb === 'function') { + cb = once(cb) + this.on('error', cb) + this.on('end', function (matches) { + cb(null, matches) + }) + } + + var self = this + var n = this.minimatch.set.length + this._processing = 0 + this.matches = new Array(n) + + this._emitQueue = [] + this._processQueue = [] + this.paused = false + + if (this.noprocess) + return this + + if (n === 0) + return done() + + for (var i = 0; i < n; i ++) { + this._process(this.minimatch.set[i], i, false, done) + } + + function done () { + --self._processing + if (self._processing <= 0) + self._finish() + } +} + +Glob.prototype._finish = function () { + assert(this instanceof Glob) + if (this.aborted) + return + + if (this.realpath && !this._didRealpath) + return this._realpath() + + common.finish(this) + this.emit('end', this.found) +} + +Glob.prototype._realpath = function () { + if (this._didRealpath) + return + + this._didRealpath = true + + var n = this.matches.length + if (n === 0) + return this._finish() + + var self = this + for (var i = 0; i < this.matches.length; i++) + this._realpathSet(i, next) + + function next () { + if (--n === 0) + self._finish() + } +} + +Glob.prototype._realpathSet = function (index, cb) { + var matchset = this.matches[index] + if (!matchset) + return cb() + + var found = Object.keys(matchset) + var self = this + var n = found.length + + if (n === 0) + return cb() + + var set = this.matches[index] = Object.create(null) + found.forEach(function (p, i) { + // If there's a problem with the stat, then it means that + // one or more of the links in the realpath couldn't be + // resolved. just return the abs value in that case. + p = self._makeAbs(p) + fs.realpath(p, self.realpathCache, function (er, real) { + if (!er) + set[real] = true + else if (er.syscall === 'stat') + set[p] = true + else + self.emit('error', er) // srsly wtf right here + + if (--n === 0) { + self.matches[index] = set + cb() + } + }) + }) +} + +Glob.prototype._mark = function (p) { + return common.mark(this, p) +} + +Glob.prototype._makeAbs = function (f) { + return common.makeAbs(this, f) +} + +Glob.prototype.abort = function () { + this.aborted = true + this.emit('abort') +} + +Glob.prototype.pause = function () { + if (!this.paused) { + this.paused = true + this.emit('pause') + } +} + +Glob.prototype.resume = function () { + if (this.paused) { + this.emit('resume') + this.paused = false + if (this._emitQueue.length) { + var eq = this._emitQueue.slice(0) + this._emitQueue.length = 0 + for (var i = 0; i < eq.length; i ++) { + var e = eq[i] + this._emitMatch(e[0], e[1]) + } + } + if (this._processQueue.length) { + var pq = this._processQueue.slice(0) + this._processQueue.length = 0 + for (var i = 0; i < pq.length; i ++) { + var p = pq[i] + this._processing-- + this._process(p[0], p[1], p[2], p[3]) + } + } + } +} + +Glob.prototype._process = function (pattern, index, inGlobStar, cb) { + assert(this instanceof Glob) + assert(typeof cb === 'function') + + if (this.aborted) + return + + this._processing++ + if (this.paused) { + this._processQueue.push([pattern, index, inGlobStar, cb]) + return + } + + //console.error('PROCESS %d', this._processing, pattern) + + // Get the first [n] parts of pattern that are all strings. + var n = 0 + while (typeof pattern[n] === 'string') { + n ++ + } + // now n is the index of the first one that is *not* a string. + + // see if there's anything else + var prefix + switch (n) { + // if not, then this is rather simple + case pattern.length: + this._processSimple(pattern.join('/'), index, cb) + return + + case 0: + // pattern *starts* with some non-trivial item. + // going to readdir(cwd), but not include the prefix in matches. + prefix = null + break + + default: + // pattern has some string bits in the front. + // whatever it starts with, whether that's 'absolute' like /foo/bar, + // or 'relative' like '../baz' + prefix = pattern.slice(0, n).join('/') + break + } + + var remain = pattern.slice(n) + + // get the list of entries. + var read + if (prefix === null) + read = '.' + else if (isAbsolute(prefix) || isAbsolute(pattern.join('/'))) { + if (!prefix || !isAbsolute(prefix)) + prefix = '/' + prefix + read = prefix + } else + read = prefix + + var abs = this._makeAbs(read) + + //if ignored, skip _processing + if (childrenIgnored(this, read)) + return cb() + + var isGlobStar = remain[0] === minimatch.GLOBSTAR + if (isGlobStar) + this._processGlobStar(prefix, read, abs, remain, index, inGlobStar, cb) + else + this._processReaddir(prefix, read, abs, remain, index, inGlobStar, cb) +} + +Glob.prototype._processReaddir = function (prefix, read, abs, remain, index, inGlobStar, cb) { + var self = this + this._readdir(abs, inGlobStar, function (er, entries) { + return self._processReaddir2(prefix, read, abs, remain, index, inGlobStar, entries, cb) + }) +} + +Glob.prototype._processReaddir2 = function (prefix, read, abs, remain, index, inGlobStar, entries, cb) { + + // if the abs isn't a dir, then nothing can match! + if (!entries) + return cb() + + // It will only match dot entries if it starts with a dot, or if + // dot is set. Stuff like @(.foo|.bar) isn't allowed. + var pn = remain[0] + var negate = !!this.minimatch.negate + var rawGlob = pn._glob + var dotOk = this.dot || rawGlob.charAt(0) === '.' + + var matchedEntries = [] + for (var i = 0; i < entries.length; i++) { + var e = entries[i] + if (e.charAt(0) !== '.' || dotOk) { + var m + if (negate && !prefix) { + m = !e.match(pn) + } else { + m = e.match(pn) + } + if (m) + matchedEntries.push(e) + } + } + + //console.error('prd2', prefix, entries, remain[0]._glob, matchedEntries) + + var len = matchedEntries.length + // If there are no matched entries, then nothing matches. + if (len === 0) + return cb() + + // if this is the last remaining pattern bit, then no need for + // an additional stat *unless* the user has specified mark or + // stat explicitly. We know they exist, since readdir returned + // them. + + if (remain.length === 1 && !this.mark && !this.stat) { + if (!this.matches[index]) + this.matches[index] = Object.create(null) + + for (var i = 0; i < len; i ++) { + var e = matchedEntries[i] + if (prefix) { + if (prefix !== '/') + e = prefix + '/' + e + else + e = prefix + e + } + + if (e.charAt(0) === '/' && !this.nomount) { + e = path.join(this.root, e) + } + this._emitMatch(index, e) + } + // This was the last one, and no stats were needed + return cb() + } + + // now test all matched entries as stand-ins for that part + // of the pattern. + remain.shift() + for (var i = 0; i < len; i ++) { + var e = matchedEntries[i] + var newPattern + if (prefix) { + if (prefix !== '/') + e = prefix + '/' + e + else + e = prefix + e + } + this._process([e].concat(remain), index, inGlobStar, cb) + } + cb() +} + +Glob.prototype._emitMatch = function (index, e) { + if (this.aborted) + return + + if (this.matches[index][e]) + return + + if (isIgnored(this, e)) + return + + if (this.paused) { + this._emitQueue.push([index, e]) + return + } + + var abs = this._makeAbs(e) + + if (this.nodir) { + var c = this.cache[abs] + if (c === 'DIR' || Array.isArray(c)) + return + } + + if (this.mark) + e = this._mark(e) + + this.matches[index][e] = true + + var st = this.statCache[abs] + if (st) + this.emit('stat', e, st) + + this.emit('match', e) +} + +Glob.prototype._readdirInGlobStar = function (abs, cb) { + if (this.aborted) + return + + // follow all symlinked directories forever + // just proceed as if this is a non-globstar situation + if (this.follow) + return this._readdir(abs, false, cb) + + var lstatkey = 'lstat\0' + abs + var self = this + var lstatcb = inflight(lstatkey, lstatcb_) + + if (lstatcb) + fs.lstat(abs, lstatcb) + + function lstatcb_ (er, lstat) { + if (er) + return cb() + + var isSym = lstat.isSymbolicLink() + self.symlinks[abs] = isSym + + // If it's not a symlink or a dir, then it's definitely a regular file. + // don't bother doing a readdir in that case. + if (!isSym && !lstat.isDirectory()) { + self.cache[abs] = 'FILE' + cb() + } else + self._readdir(abs, false, cb) + } +} + +Glob.prototype._readdir = function (abs, inGlobStar, cb) { + if (this.aborted) + return + + cb = inflight('readdir\0'+abs+'\0'+inGlobStar, cb) + if (!cb) + return + + //console.error('RD %j %j', +inGlobStar, abs) + if (inGlobStar && !ownProp(this.symlinks, abs)) + return this._readdirInGlobStar(abs, cb) + + if (ownProp(this.cache, abs)) { + var c = this.cache[abs] + if (!c || c === 'FILE') + return cb() + + if (Array.isArray(c)) + return cb(null, c) + } + + var self = this + fs.readdir(abs, readdirCb(this, abs, cb)) +} + +function readdirCb (self, abs, cb) { + return function (er, entries) { + if (er) + self._readdirError(abs, er, cb) + else + self._readdirEntries(abs, entries, cb) + } +} + +Glob.prototype._readdirEntries = function (abs, entries, cb) { + if (this.aborted) + return + + // if we haven't asked to stat everything, then just + // assume that everything in there exists, so we can avoid + // having to stat it a second time. + if (!this.mark && !this.stat) { + for (var i = 0; i < entries.length; i ++) { + var e = entries[i] + if (abs === '/') + e = abs + e + else + e = abs + '/' + e + this.cache[e] = true + } + } + + this.cache[abs] = entries + return cb(null, entries) +} + +Glob.prototype._readdirError = function (f, er, cb) { + if (this.aborted) + return + + // handle errors, and cache the information + switch (er.code) { + case 'ENOTSUP': // https://github.com/isaacs/node-glob/issues/205 + case 'ENOTDIR': // totally normal. means it *does* exist. + this.cache[this._makeAbs(f)] = 'FILE' + break + + case 'ENOENT': // not terribly unusual + case 'ELOOP': + case 'ENAMETOOLONG': + case 'UNKNOWN': + this.cache[this._makeAbs(f)] = false + break + + default: // some unusual error. Treat as failure. + this.cache[this._makeAbs(f)] = false + if (this.strict) { + this.emit('error', er) + // If the error is handled, then we abort + // if not, we threw out of here + this.abort() + } + if (!this.silent) + console.error('glob error', er) + break + } + + return cb() +} + +Glob.prototype._processGlobStar = function (prefix, read, abs, remain, index, inGlobStar, cb) { + var self = this + this._readdir(abs, inGlobStar, function (er, entries) { + self._processGlobStar2(prefix, read, abs, remain, index, inGlobStar, entries, cb) + }) +} + + +Glob.prototype._processGlobStar2 = function (prefix, read, abs, remain, index, inGlobStar, entries, cb) { + //console.error('pgs2', prefix, remain[0], entries) + + // no entries means not a dir, so it can never have matches + // foo.txt/** doesn't match foo.txt + if (!entries) + return cb() + + // test without the globstar, and with every child both below + // and replacing the globstar. + var remainWithoutGlobStar = remain.slice(1) + var gspref = prefix ? [ prefix ] : [] + var noGlobStar = gspref.concat(remainWithoutGlobStar) + + // the noGlobStar pattern exits the inGlobStar state + this._process(noGlobStar, index, false, cb) + + var isSym = this.symlinks[abs] + var len = entries.length + + // If it's a symlink, and we're in a globstar, then stop + if (isSym && inGlobStar) + return cb() + + for (var i = 0; i < len; i++) { + var e = entries[i] + if (e.charAt(0) === '.' && !this.dot) + continue + + // these two cases enter the inGlobStar state + var instead = gspref.concat(entries[i], remainWithoutGlobStar) + this._process(instead, index, true, cb) + + var below = gspref.concat(entries[i], remain) + this._process(below, index, true, cb) + } + + cb() +} + +Glob.prototype._processSimple = function (prefix, index, cb) { + // XXX review this. Shouldn't it be doing the mounting etc + // before doing stat? kinda weird? + var self = this + this._stat(prefix, function (er, exists) { + self._processSimple2(prefix, index, er, exists, cb) + }) +} +Glob.prototype._processSimple2 = function (prefix, index, er, exists, cb) { + + //console.error('ps2', prefix, exists) + + if (!this.matches[index]) + this.matches[index] = Object.create(null) + + // If it doesn't exist, then just mark the lack of results + if (!exists) + return cb() + + if (prefix && isAbsolute(prefix) && !this.nomount) { + var trail = /[\/\\]$/.test(prefix) + if (prefix.charAt(0) === '/') { + prefix = path.join(this.root, prefix) + } else { + prefix = path.resolve(this.root, prefix) + if (trail) + prefix += '/' + } + } + + if (process.platform === 'win32') + prefix = prefix.replace(/\\/g, '/') + + // Mark this as a match + this._emitMatch(index, prefix) + cb() +} + +// Returns either 'DIR', 'FILE', or false +Glob.prototype._stat = function (f, cb) { + var abs = this._makeAbs(f) + var needDir = f.slice(-1) === '/' + + if (f.length > this.maxLength) + return cb() + + if (!this.stat && ownProp(this.cache, abs)) { + var c = this.cache[abs] + + if (Array.isArray(c)) + c = 'DIR' + + // It exists, but maybe not how we need it + if (!needDir || c === 'DIR') + return cb(null, c) + + if (needDir && c === 'FILE') + return cb() + + // otherwise we have to stat, because maybe c=true + // if we know it exists, but not what it is. + } + + var exists + var stat = this.statCache[abs] + if (stat !== undefined) { + if (stat === false) + return cb(null, stat) + else { + var type = stat.isDirectory() ? 'DIR' : 'FILE' + if (needDir && type === 'FILE') + return cb() + else + return cb(null, type, stat) + } + } + + var self = this + var statcb = inflight('stat\0' + abs, lstatcb_) + if (statcb) + fs.lstat(abs, statcb) + + function lstatcb_ (er, lstat) { + if (lstat && lstat.isSymbolicLink()) { + // If it's a symlink, then treat it as the target, unless + // the target does not exist, then treat it as a file. + return fs.stat(abs, function (er, stat) { + if (er) + self._stat2(f, abs, null, lstat, cb) + else + self._stat2(f, abs, er, stat, cb) + }) + } else { + self._stat2(f, abs, er, lstat, cb) + } + } +} + +Glob.prototype._stat2 = function (f, abs, er, stat, cb) { + if (er) { + this.statCache[abs] = false + return cb() + } + + var needDir = f.slice(-1) === '/' + this.statCache[abs] = stat + + if (abs.slice(-1) === '/' && !stat.isDirectory()) + return cb(null, false, stat) + + var c = stat.isDirectory() ? 'DIR' : 'FILE' + this.cache[abs] = this.cache[abs] || c + + if (needDir && c !== 'DIR') + return cb() + + return cb(null, c, stat) +} + +}).call(this,require('_process')) +},{"./common.js":15,"./sync.js":17,"_process":24,"assert":9,"events":14,"fs":12,"inflight":18,"inherits":19,"minimatch":20,"once":21,"path":22,"path-is-absolute":23,"util":28}],17:[function(require,module,exports){ +(function (process){ +module.exports = globSync +globSync.GlobSync = GlobSync + +var fs = require('fs') +var minimatch = require('minimatch') +var Minimatch = minimatch.Minimatch +var Glob = require('./glob.js').Glob +var util = require('util') +var path = require('path') +var assert = require('assert') +var isAbsolute = require('path-is-absolute') +var common = require('./common.js') +var alphasort = common.alphasort +var alphasorti = common.alphasorti +var setopts = common.setopts +var ownProp = common.ownProp +var childrenIgnored = common.childrenIgnored + +function globSync (pattern, options) { + if (typeof options === 'function' || arguments.length === 3) + throw new TypeError('callback provided to sync glob\n'+ + 'See: https://github.com/isaacs/node-glob/issues/167') + + return new GlobSync(pattern, options).found +} + +function GlobSync (pattern, options) { + if (!pattern) + throw new Error('must provide pattern') + + if (typeof options === 'function' || arguments.length === 3) + throw new TypeError('callback provided to sync glob\n'+ + 'See: https://github.com/isaacs/node-glob/issues/167') + + if (!(this instanceof GlobSync)) + return new GlobSync(pattern, options) + + setopts(this, pattern, options) + + if (this.noprocess) + return this + + var n = this.minimatch.set.length + this.matches = new Array(n) + for (var i = 0; i < n; i ++) { + this._process(this.minimatch.set[i], i, false) + } + this._finish() +} + +GlobSync.prototype._finish = function () { + assert(this instanceof GlobSync) + if (this.realpath) { + var self = this + this.matches.forEach(function (matchset, index) { + var set = self.matches[index] = Object.create(null) + for (var p in matchset) { + try { + p = self._makeAbs(p) + var real = fs.realpathSync(p, self.realpathCache) + set[real] = true + } catch (er) { + if (er.syscall === 'stat') + set[self._makeAbs(p)] = true + else + throw er + } + } + }) + } + common.finish(this) +} + + +GlobSync.prototype._process = function (pattern, index, inGlobStar) { + assert(this instanceof GlobSync) + + // Get the first [n] parts of pattern that are all strings. + var n = 0 + while (typeof pattern[n] === 'string') { + n ++ + } + // now n is the index of the first one that is *not* a string. + + // See if there's anything else + var prefix + switch (n) { + // if not, then this is rather simple + case pattern.length: + this._processSimple(pattern.join('/'), index) + return + + case 0: + // pattern *starts* with some non-trivial item. + // going to readdir(cwd), but not include the prefix in matches. + prefix = null + break + + default: + // pattern has some string bits in the front. + // whatever it starts with, whether that's 'absolute' like /foo/bar, + // or 'relative' like '../baz' + prefix = pattern.slice(0, n).join('/') + break + } + + var remain = pattern.slice(n) + + // get the list of entries. + var read + if (prefix === null) + read = '.' + else if (isAbsolute(prefix) || isAbsolute(pattern.join('/'))) { + if (!prefix || !isAbsolute(prefix)) + prefix = '/' + prefix + read = prefix + } else + read = prefix + + var abs = this._makeAbs(read) + + //if ignored, skip processing + if (childrenIgnored(this, read)) + return + + var isGlobStar = remain[0] === minimatch.GLOBSTAR + if (isGlobStar) + this._processGlobStar(prefix, read, abs, remain, index, inGlobStar) + else + this._processReaddir(prefix, read, abs, remain, index, inGlobStar) +} + + +GlobSync.prototype._processReaddir = function (prefix, read, abs, remain, index, inGlobStar) { + var entries = this._readdir(abs, inGlobStar) + + // if the abs isn't a dir, then nothing can match! + if (!entries) + return + + // It will only match dot entries if it starts with a dot, or if + // dot is set. Stuff like @(.foo|.bar) isn't allowed. + var pn = remain[0] + var negate = !!this.minimatch.negate + var rawGlob = pn._glob + var dotOk = this.dot || rawGlob.charAt(0) === '.' + + var matchedEntries = [] + for (var i = 0; i < entries.length; i++) { + var e = entries[i] + if (e.charAt(0) !== '.' || dotOk) { + var m + if (negate && !prefix) { + m = !e.match(pn) + } else { + m = e.match(pn) + } + if (m) + matchedEntries.push(e) + } + } + + var len = matchedEntries.length + // If there are no matched entries, then nothing matches. + if (len === 0) + return + + // if this is the last remaining pattern bit, then no need for + // an additional stat *unless* the user has specified mark or + // stat explicitly. We know they exist, since readdir returned + // them. + + if (remain.length === 1 && !this.mark && !this.stat) { + if (!this.matches[index]) + this.matches[index] = Object.create(null) + + for (var i = 0; i < len; i ++) { + var e = matchedEntries[i] + if (prefix) { + if (prefix.slice(-1) !== '/') + e = prefix + '/' + e + else + e = prefix + e + } + + if (e.charAt(0) === '/' && !this.nomount) { + e = path.join(this.root, e) + } + this.matches[index][e] = true + } + // This was the last one, and no stats were needed + return + } + + // now test all matched entries as stand-ins for that part + // of the pattern. + remain.shift() + for (var i = 0; i < len; i ++) { + var e = matchedEntries[i] + var newPattern + if (prefix) + newPattern = [prefix, e] + else + newPattern = [e] + this._process(newPattern.concat(remain), index, inGlobStar) + } +} + + +GlobSync.prototype._emitMatch = function (index, e) { + var abs = this._makeAbs(e) + if (this.mark) + e = this._mark(e) + + if (this.matches[index][e]) + return + + if (this.nodir) { + var c = this.cache[this._makeAbs(e)] + if (c === 'DIR' || Array.isArray(c)) + return + } + + this.matches[index][e] = true + if (this.stat) + this._stat(e) +} + + +GlobSync.prototype._readdirInGlobStar = function (abs) { + // follow all symlinked directories forever + // just proceed as if this is a non-globstar situation + if (this.follow) + return this._readdir(abs, false) + + var entries + var lstat + var stat + try { + lstat = fs.lstatSync(abs) + } catch (er) { + // lstat failed, doesn't exist + return null + } + + var isSym = lstat.isSymbolicLink() + this.symlinks[abs] = isSym + + // If it's not a symlink or a dir, then it's definitely a regular file. + // don't bother doing a readdir in that case. + if (!isSym && !lstat.isDirectory()) + this.cache[abs] = 'FILE' + else + entries = this._readdir(abs, false) + + return entries +} + +GlobSync.prototype._readdir = function (abs, inGlobStar) { + var entries + + if (inGlobStar && !ownProp(this.symlinks, abs)) + return this._readdirInGlobStar(abs) + + if (ownProp(this.cache, abs)) { + var c = this.cache[abs] + if (!c || c === 'FILE') + return null + + if (Array.isArray(c)) + return c + } + + try { + return this._readdirEntries(abs, fs.readdirSync(abs)) + } catch (er) { + this._readdirError(abs, er) + return null + } +} + +GlobSync.prototype._readdirEntries = function (abs, entries) { + // if we haven't asked to stat everything, then just + // assume that everything in there exists, so we can avoid + // having to stat it a second time. + if (!this.mark && !this.stat) { + for (var i = 0; i < entries.length; i ++) { + var e = entries[i] + if (abs === '/') + e = abs + e + else + e = abs + '/' + e + this.cache[e] = true + } + } + + this.cache[abs] = entries + + // mark and cache dir-ness + return entries +} + +GlobSync.prototype._readdirError = function (f, er) { + // handle errors, and cache the information + switch (er.code) { + case 'ENOTSUP': // https://github.com/isaacs/node-glob/issues/205 + case 'ENOTDIR': // totally normal. means it *does* exist. + this.cache[this._makeAbs(f)] = 'FILE' + break + + case 'ENOENT': // not terribly unusual + case 'ELOOP': + case 'ENAMETOOLONG': + case 'UNKNOWN': + this.cache[this._makeAbs(f)] = false + break + + default: // some unusual error. Treat as failure. + this.cache[this._makeAbs(f)] = false + if (this.strict) + throw er + if (!this.silent) + console.error('glob error', er) + break + } +} + +GlobSync.prototype._processGlobStar = function (prefix, read, abs, remain, index, inGlobStar) { + + var entries = this._readdir(abs, inGlobStar) + + // no entries means not a dir, so it can never have matches + // foo.txt/** doesn't match foo.txt + if (!entries) + return + + // test without the globstar, and with every child both below + // and replacing the globstar. + var remainWithoutGlobStar = remain.slice(1) + var gspref = prefix ? [ prefix ] : [] + var noGlobStar = gspref.concat(remainWithoutGlobStar) + + // the noGlobStar pattern exits the inGlobStar state + this._process(noGlobStar, index, false) + + var len = entries.length + var isSym = this.symlinks[abs] + + // If it's a symlink, and we're in a globstar, then stop + if (isSym && inGlobStar) + return + + for (var i = 0; i < len; i++) { + var e = entries[i] + if (e.charAt(0) === '.' && !this.dot) + continue + + // these two cases enter the inGlobStar state + var instead = gspref.concat(entries[i], remainWithoutGlobStar) + this._process(instead, index, true) + + var below = gspref.concat(entries[i], remain) + this._process(below, index, true) + } +} + +GlobSync.prototype._processSimple = function (prefix, index) { + // XXX review this. Shouldn't it be doing the mounting etc + // before doing stat? kinda weird? + var exists = this._stat(prefix) + + if (!this.matches[index]) + this.matches[index] = Object.create(null) + + // If it doesn't exist, then just mark the lack of results + if (!exists) + return + + if (prefix && isAbsolute(prefix) && !this.nomount) { + var trail = /[\/\\]$/.test(prefix) + if (prefix.charAt(0) === '/') { + prefix = path.join(this.root, prefix) + } else { + prefix = path.resolve(this.root, prefix) + if (trail) + prefix += '/' + } + } + + if (process.platform === 'win32') + prefix = prefix.replace(/\\/g, '/') + + // Mark this as a match + this.matches[index][prefix] = true +} + +// Returns either 'DIR', 'FILE', or false +GlobSync.prototype._stat = function (f) { + var abs = this._makeAbs(f) + var needDir = f.slice(-1) === '/' + + if (f.length > this.maxLength) + return false + + if (!this.stat && ownProp(this.cache, abs)) { + var c = this.cache[abs] + + if (Array.isArray(c)) + c = 'DIR' + + // It exists, but maybe not how we need it + if (!needDir || c === 'DIR') + return c + + if (needDir && c === 'FILE') + return false + + // otherwise we have to stat, because maybe c=true + // if we know it exists, but not what it is. + } + + var exists + var stat = this.statCache[abs] + if (!stat) { + var lstat + try { + lstat = fs.lstatSync(abs) + } catch (er) { + return false + } + + if (lstat.isSymbolicLink()) { + try { + stat = fs.statSync(abs) + } catch (er) { + stat = lstat + } + } else { + stat = lstat + } + } + + this.statCache[abs] = stat + + var c = stat.isDirectory() ? 'DIR' : 'FILE' + this.cache[abs] = this.cache[abs] || c + + if (needDir && c !== 'DIR') + return false + + return c +} + +GlobSync.prototype._mark = function (p) { + return common.mark(this, p) +} + +GlobSync.prototype._makeAbs = function (f) { + return common.makeAbs(this, f) +} + +}).call(this,require('_process')) +},{"./common.js":15,"./glob.js":16,"_process":24,"assert":9,"fs":12,"minimatch":20,"path":22,"path-is-absolute":23,"util":28}],18:[function(require,module,exports){ +(function (process){ +var wrappy = require('wrappy') +var reqs = Object.create(null) +var once = require('once') + +module.exports = wrappy(inflight) + +function inflight (key, cb) { + if (reqs[key]) { + reqs[key].push(cb) + return null + } else { + reqs[key] = [cb] + return makeres(key) + } +} + +function makeres (key) { + return once(function RES () { + var cbs = reqs[key] + var len = cbs.length + var args = slice(arguments) + + // XXX It's somewhat ambiguous whether a new callback added in this + // pass should be queued for later execution if something in the + // list of callbacks throws, or if it should just be discarded. + // However, it's such an edge case that it hardly matters, and either + // choice is likely as surprising as the other. + // As it happens, we do go ahead and schedule it for later execution. + try { + for (var i = 0; i < len; i++) { + cbs[i].apply(null, args) + } + } finally { + if (cbs.length > len) { + // added more in the interim. + // de-zalgo, just in case, but don't call again. + cbs.splice(0, len) + process.nextTick(function () { + RES.apply(null, args) + }) + } else { + delete reqs[key] + } + } + }) +} + +function slice (args) { + var length = args.length + var array = [] + + for (var i = 0; i < length; i++) array[i] = args[i] + return array +} + +}).call(this,require('_process')) +},{"_process":24,"once":21,"wrappy":29}],19:[function(require,module,exports){ +if (typeof Object.create === 'function') { + // implementation from standard node.js 'util' module + module.exports = function inherits(ctor, superCtor) { + ctor.super_ = superCtor + ctor.prototype = Object.create(superCtor.prototype, { + constructor: { + value: ctor, + enumerable: false, + writable: true, + configurable: true + } + }); + }; +} else { + // old school shim for old browsers + module.exports = function inherits(ctor, superCtor) { + ctor.super_ = superCtor + var TempCtor = function () {} + TempCtor.prototype = superCtor.prototype + ctor.prototype = new TempCtor() + ctor.prototype.constructor = ctor + } +} + +},{}],20:[function(require,module,exports){ +module.exports = minimatch +minimatch.Minimatch = Minimatch + +var path = { sep: '/' } +try { + path = require('path') +} catch (er) {} + +var GLOBSTAR = minimatch.GLOBSTAR = Minimatch.GLOBSTAR = {} +var expand = require('brace-expansion') + +var plTypes = { + '!': { open: '(?:(?!(?:', close: '))[^/]*?)'}, + '?': { open: '(?:', close: ')?' }, + '+': { open: '(?:', close: ')+' }, + '*': { open: '(?:', close: ')*' }, + '@': { open: '(?:', close: ')' } +} + +// any single thing other than / +// don't need to escape / when using new RegExp() +var qmark = '[^/]' + +// * => any number of characters +var star = qmark + '*?' + +// ** when dots are allowed. Anything goes, except .. and . +// not (^ or / followed by one or two dots followed by $ or /), +// followed by anything, any number of times. +var twoStarDot = '(?:(?!(?:\\\/|^)(?:\\.{1,2})($|\\\/)).)*?' + +// not a ^ or / followed by a dot, +// followed by anything, any number of times. +var twoStarNoDot = '(?:(?!(?:\\\/|^)\\.).)*?' + +// characters that need to be escaped in RegExp. +var reSpecials = charSet('().*{}+?[]^$\\!') + +// "abc" -> { a:true, b:true, c:true } +function charSet (s) { + return s.split('').reduce(function (set, c) { + set[c] = true + return set + }, {}) +} + +// normalizes slashes. +var slashSplit = /\/+/ + +minimatch.filter = filter +function filter (pattern, options) { + options = options || {} + return function (p, i, list) { + return minimatch(p, pattern, options) + } +} + +function ext (a, b) { + a = a || {} + b = b || {} + var t = {} + Object.keys(b).forEach(function (k) { + t[k] = b[k] + }) + Object.keys(a).forEach(function (k) { + t[k] = a[k] + }) + return t +} + +minimatch.defaults = function (def) { + if (!def || !Object.keys(def).length) return minimatch + + var orig = minimatch + + var m = function minimatch (p, pattern, options) { + return orig.minimatch(p, pattern, ext(def, options)) + } + + m.Minimatch = function Minimatch (pattern, options) { + return new orig.Minimatch(pattern, ext(def, options)) + } + + return m +} + +Minimatch.defaults = function (def) { + if (!def || !Object.keys(def).length) return Minimatch + return minimatch.defaults(def).Minimatch +} + +function minimatch (p, pattern, options) { + if (typeof pattern !== 'string') { + throw new TypeError('glob pattern string required') + } + + if (!options) options = {} + + // shortcut: comments match nothing. + if (!options.nocomment && pattern.charAt(0) === '#') { + return false + } + + // "" only matches "" + if (pattern.trim() === '') return p === '' + + return new Minimatch(pattern, options).match(p) +} + +function Minimatch (pattern, options) { + if (!(this instanceof Minimatch)) { + return new Minimatch(pattern, options) + } + + if (typeof pattern !== 'string') { + throw new TypeError('glob pattern string required') + } + + if (!options) options = {} + pattern = pattern.trim() + + // windows support: need to use /, not \ + if (path.sep !== '/') { + pattern = pattern.split(path.sep).join('/') + } + + this.options = options + this.set = [] + this.pattern = pattern + this.regexp = null + this.negate = false + this.comment = false + this.empty = false + + // make the set of regexps etc. + this.make() +} + +Minimatch.prototype.debug = function () {} + +Minimatch.prototype.make = make +function make () { + // don't do it more than once. + if (this._made) return + + var pattern = this.pattern + var options = this.options + + // empty patterns and comments match nothing. + if (!options.nocomment && pattern.charAt(0) === '#') { + this.comment = true + return + } + if (!pattern) { + this.empty = true + return + } + + // step 1: figure out negation, etc. + this.parseNegate() + + // step 2: expand braces + var set = this.globSet = this.braceExpand() + + if (options.debug) this.debug = console.error + + this.debug(this.pattern, set) + + // step 3: now we have a set, so turn each one into a series of path-portion + // matching patterns. + // These will be regexps, except in the case of "**", which is + // set to the GLOBSTAR object for globstar behavior, + // and will not contain any / characters + set = this.globParts = set.map(function (s) { + return s.split(slashSplit) + }) + + this.debug(this.pattern, set) + + // glob --> regexps + set = set.map(function (s, si, set) { + return s.map(this.parse, this) + }, this) + + this.debug(this.pattern, set) + + // filter out everything that didn't compile properly. + set = set.filter(function (s) { + return s.indexOf(false) === -1 + }) + + this.debug(this.pattern, set) + + this.set = set +} + +Minimatch.prototype.parseNegate = parseNegate +function parseNegate () { + var pattern = this.pattern + var negate = false + var options = this.options + var negateOffset = 0 + + if (options.nonegate) return + + for (var i = 0, l = pattern.length + ; i < l && pattern.charAt(i) === '!' + ; i++) { + negate = !negate + negateOffset++ + } + + if (negateOffset) this.pattern = pattern.substr(negateOffset) + this.negate = negate +} + +// Brace expansion: +// a{b,c}d -> abd acd +// a{b,}c -> abc ac +// a{0..3}d -> a0d a1d a2d a3d +// a{b,c{d,e}f}g -> abg acdfg acefg +// a{b,c}d{e,f}g -> abdeg acdeg abdeg abdfg +// +// Invalid sets are not expanded. +// a{2..}b -> a{2..}b +// a{b}c -> a{b}c +minimatch.braceExpand = function (pattern, options) { + return braceExpand(pattern, options) +} + +Minimatch.prototype.braceExpand = braceExpand + +function braceExpand (pattern, options) { + if (!options) { + if (this instanceof Minimatch) { + options = this.options + } else { + options = {} + } + } + + pattern = typeof pattern === 'undefined' + ? this.pattern : pattern + + if (typeof pattern === 'undefined') { + throw new TypeError('undefined pattern') + } + + if (options.nobrace || + !pattern.match(/\{.*\}/)) { + // shortcut. no need to expand. + return [pattern] + } + + return expand(pattern) +} + +// parse a component of the expanded set. +// At this point, no pattern may contain "/" in it +// so we're going to return a 2d array, where each entry is the full +// pattern, split on '/', and then turned into a regular expression. +// A regexp is made at the end which joins each array with an +// escaped /, and another full one which joins each regexp with |. +// +// Following the lead of Bash 4.1, note that "**" only has special meaning +// when it is the *only* thing in a path portion. Otherwise, any series +// of * is equivalent to a single *. Globstar behavior is enabled by +// default, and can be disabled by setting options.noglobstar. +Minimatch.prototype.parse = parse +var SUBPARSE = {} +function parse (pattern, isSub) { + if (pattern.length > 1024 * 64) { + throw new TypeError('pattern is too long') + } + + var options = this.options + + // shortcuts + if (!options.noglobstar && pattern === '**') return GLOBSTAR + if (pattern === '') return '' + + var re = '' + var hasMagic = !!options.nocase + var escaping = false + // ? => one single character + var patternListStack = [] + var negativeLists = [] + var stateChar + var inClass = false + var reClassStart = -1 + var classStart = -1 + // . and .. never match anything that doesn't start with ., + // even when options.dot is set. + var patternStart = pattern.charAt(0) === '.' ? '' // anything + // not (start or / followed by . or .. followed by / or end) + : options.dot ? '(?!(?:^|\\\/)\\.{1,2}(?:$|\\\/))' + : '(?!\\.)' + var self = this + + function clearStateChar () { + if (stateChar) { + // we had some state-tracking character + // that wasn't consumed by this pass. + switch (stateChar) { + case '*': + re += star + hasMagic = true + break + case '?': + re += qmark + hasMagic = true + break + default: + re += '\\' + stateChar + break + } + self.debug('clearStateChar %j %j', stateChar, re) + stateChar = false + } + } + + for (var i = 0, len = pattern.length, c + ; (i < len) && (c = pattern.charAt(i)) + ; i++) { + this.debug('%s\t%s %s %j', pattern, i, re, c) + + // skip over any that are escaped. + if (escaping && reSpecials[c]) { + re += '\\' + c + escaping = false + continue + } + + switch (c) { + case '/': + // completely not allowed, even escaped. + // Should already be path-split by now. + return false + + case '\\': + clearStateChar() + escaping = true + continue + + // the various stateChar values + // for the "extglob" stuff. + case '?': + case '*': + case '+': + case '@': + case '!': + this.debug('%s\t%s %s %j <-- stateChar', pattern, i, re, c) + + // all of those are literals inside a class, except that + // the glob [!a] means [^a] in regexp + if (inClass) { + this.debug(' in class') + if (c === '!' && i === classStart + 1) c = '^' + re += c + continue + } + + // if we already have a stateChar, then it means + // that there was something like ** or +? in there. + // Handle the stateChar, then proceed with this one. + self.debug('call clearStateChar %j', stateChar) + clearStateChar() + stateChar = c + // if extglob is disabled, then +(asdf|foo) isn't a thing. + // just clear the statechar *now*, rather than even diving into + // the patternList stuff. + if (options.noext) clearStateChar() + continue + + case '(': + if (inClass) { + re += '(' + continue + } + + if (!stateChar) { + re += '\\(' + continue + } + + patternListStack.push({ + type: stateChar, + start: i - 1, + reStart: re.length, + open: plTypes[stateChar].open, + close: plTypes[stateChar].close + }) + // negation is (?:(?!js)[^/]*) + re += stateChar === '!' ? '(?:(?!(?:' : '(?:' + this.debug('plType %j %j', stateChar, re) + stateChar = false + continue + + case ')': + if (inClass || !patternListStack.length) { + re += '\\)' + continue + } + + clearStateChar() + hasMagic = true + var pl = patternListStack.pop() + // negation is (?:(?!js)[^/]*) + // The others are (?:) + re += pl.close + if (pl.type === '!') { + negativeLists.push(pl) + } + pl.reEnd = re.length + continue + + case '|': + if (inClass || !patternListStack.length || escaping) { + re += '\\|' + escaping = false + continue + } + + clearStateChar() + re += '|' + continue + + // these are mostly the same in regexp and glob + case '[': + // swallow any state-tracking char before the [ + clearStateChar() + + if (inClass) { + re += '\\' + c + continue + } + + inClass = true + classStart = i + reClassStart = re.length + re += c + continue + + case ']': + // a right bracket shall lose its special + // meaning and represent itself in + // a bracket expression if it occurs + // first in the list. -- POSIX.2 2.8.3.2 + if (i === classStart + 1 || !inClass) { + re += '\\' + c + escaping = false + continue + } + + // handle the case where we left a class open. + // "[z-a]" is valid, equivalent to "\[z-a\]" + if (inClass) { + // split where the last [ was, make sure we don't have + // an invalid re. if so, re-walk the contents of the + // would-be class to re-translate any characters that + // were passed through as-is + // TODO: It would probably be faster to determine this + // without a try/catch and a new RegExp, but it's tricky + // to do safely. For now, this is safe and works. + var cs = pattern.substring(classStart + 1, i) + try { + RegExp('[' + cs + ']') + } catch (er) { + // not a valid class! + var sp = this.parse(cs, SUBPARSE) + re = re.substr(0, reClassStart) + '\\[' + sp[0] + '\\]' + hasMagic = hasMagic || sp[1] + inClass = false + continue + } + } + + // finish up the class. + hasMagic = true + inClass = false + re += c + continue + + default: + // swallow any state char that wasn't consumed + clearStateChar() + + if (escaping) { + // no need + escaping = false + } else if (reSpecials[c] + && !(c === '^' && inClass)) { + re += '\\' + } + + re += c + + } // switch + } // for + + // handle the case where we left a class open. + // "[abc" is valid, equivalent to "\[abc" + if (inClass) { + // split where the last [ was, and escape it + // this is a huge pita. We now have to re-walk + // the contents of the would-be class to re-translate + // any characters that were passed through as-is + cs = pattern.substr(classStart + 1) + sp = this.parse(cs, SUBPARSE) + re = re.substr(0, reClassStart) + '\\[' + sp[0] + hasMagic = hasMagic || sp[1] + } + + // handle the case where we had a +( thing at the *end* + // of the pattern. + // each pattern list stack adds 3 chars, and we need to go through + // and escape any | chars that were passed through as-is for the regexp. + // Go through and escape them, taking care not to double-escape any + // | chars that were already escaped. + for (pl = patternListStack.pop(); pl; pl = patternListStack.pop()) { + var tail = re.slice(pl.reStart + pl.open.length) + this.debug('setting tail', re, pl) + // maybe some even number of \, then maybe 1 \, followed by a | + tail = tail.replace(/((?:\\{2}){0,64})(\\?)\|/g, function (_, $1, $2) { + if (!$2) { + // the | isn't already escaped, so escape it. + $2 = '\\' + } + + // need to escape all those slashes *again*, without escaping the + // one that we need for escaping the | character. As it works out, + // escaping an even number of slashes can be done by simply repeating + // it exactly after itself. That's why this trick works. + // + // I am sorry that you have to see this. + return $1 + $1 + $2 + '|' + }) + + this.debug('tail=%j\n %s', tail, tail, pl, re) + var t = pl.type === '*' ? star + : pl.type === '?' ? qmark + : '\\' + pl.type + + hasMagic = true + re = re.slice(0, pl.reStart) + t + '\\(' + tail + } + + // handle trailing things that only matter at the very end. + clearStateChar() + if (escaping) { + // trailing \\ + re += '\\\\' + } + + // only need to apply the nodot start if the re starts with + // something that could conceivably capture a dot + var addPatternStart = false + switch (re.charAt(0)) { + case '.': + case '[': + case '(': addPatternStart = true + } + + // Hack to work around lack of negative lookbehind in JS + // A pattern like: *.!(x).!(y|z) needs to ensure that a name + // like 'a.xyz.yz' doesn't match. So, the first negative + // lookahead, has to look ALL the way ahead, to the end of + // the pattern. + for (var n = negativeLists.length - 1; n > -1; n--) { + var nl = negativeLists[n] + + var nlBefore = re.slice(0, nl.reStart) + var nlFirst = re.slice(nl.reStart, nl.reEnd - 8) + var nlLast = re.slice(nl.reEnd - 8, nl.reEnd) + var nlAfter = re.slice(nl.reEnd) + + nlLast += nlAfter + + // Handle nested stuff like *(*.js|!(*.json)), where open parens + // mean that we should *not* include the ) in the bit that is considered + // "after" the negated section. + var openParensBefore = nlBefore.split('(').length - 1 + var cleanAfter = nlAfter + for (i = 0; i < openParensBefore; i++) { + cleanAfter = cleanAfter.replace(/\)[+*?]?/, '') + } + nlAfter = cleanAfter + + var dollar = '' + if (nlAfter === '' && isSub !== SUBPARSE) { + dollar = '$' + } + var newRe = nlBefore + nlFirst + nlAfter + dollar + nlLast + re = newRe + } + + // if the re is not "" at this point, then we need to make sure + // it doesn't match against an empty path part. + // Otherwise a/* will match a/, which it should not. + if (re !== '' && hasMagic) { + re = '(?=.)' + re + } + + if (addPatternStart) { + re = patternStart + re + } + + // parsing just a piece of a larger pattern. + if (isSub === SUBPARSE) { + return [re, hasMagic] + } + + // skip the regexp for non-magical patterns + // unescape anything in it, though, so that it'll be + // an exact match against a file etc. + if (!hasMagic) { + return globUnescape(pattern) + } + + var flags = options.nocase ? 'i' : '' + try { + var regExp = new RegExp('^' + re + '$', flags) + } catch (er) { + // If it was an invalid regular expression, then it can't match + // anything. This trick looks for a character after the end of + // the string, which is of course impossible, except in multi-line + // mode, but it's not a /m regex. + return new RegExp('$.') + } + + regExp._glob = pattern + regExp._src = re + + return regExp +} + +minimatch.makeRe = function (pattern, options) { + return new Minimatch(pattern, options || {}).makeRe() +} + +Minimatch.prototype.makeRe = makeRe +function makeRe () { + if (this.regexp || this.regexp === false) return this.regexp + + // at this point, this.set is a 2d array of partial + // pattern strings, or "**". + // + // It's better to use .match(). This function shouldn't + // be used, really, but it's pretty convenient sometimes, + // when you just want to work with a regex. + var set = this.set + + if (!set.length) { + this.regexp = false + return this.regexp + } + var options = this.options + + var twoStar = options.noglobstar ? star + : options.dot ? twoStarDot + : twoStarNoDot + var flags = options.nocase ? 'i' : '' + + var re = set.map(function (pattern) { + return pattern.map(function (p) { + return (p === GLOBSTAR) ? twoStar + : (typeof p === 'string') ? regExpEscape(p) + : p._src + }).join('\\\/') + }).join('|') + + // must match entire pattern + // ending in a * or ** will make it less strict. + re = '^(?:' + re + ')$' + + // can match anything, as long as it's not this. + if (this.negate) re = '^(?!' + re + ').*$' + + try { + this.regexp = new RegExp(re, flags) + } catch (ex) { + this.regexp = false + } + return this.regexp +} + +minimatch.match = function (list, pattern, options) { + options = options || {} + var mm = new Minimatch(pattern, options) + list = list.filter(function (f) { + return mm.match(f) + }) + if (mm.options.nonull && !list.length) { + list.push(pattern) + } + return list +} + +Minimatch.prototype.match = match +function match (f, partial) { + this.debug('match', f, this.pattern) + // short-circuit in the case of busted things. + // comments, etc. + if (this.comment) return false + if (this.empty) return f === '' + + if (f === '/' && partial) return true + + var options = this.options + + // windows: need to use /, not \ + if (path.sep !== '/') { + f = f.split(path.sep).join('/') + } + + // treat the test path as a set of pathparts. + f = f.split(slashSplit) + this.debug(this.pattern, 'split', f) + + // just ONE of the pattern sets in this.set needs to match + // in order for it to be valid. If negating, then just one + // match means that we have failed. + // Either way, return on the first hit. + + var set = this.set + this.debug(this.pattern, 'set', set) + + // Find the basename of the path by looking for the last non-empty segment + var filename + var i + for (i = f.length - 1; i >= 0; i--) { + filename = f[i] + if (filename) break + } + + for (i = 0; i < set.length; i++) { + var pattern = set[i] + var file = f + if (options.matchBase && pattern.length === 1) { + file = [filename] + } + var hit = this.matchOne(file, pattern, partial) + if (hit) { + if (options.flipNegate) return true + return !this.negate + } + } + + // didn't get any hits. this is success if it's a negative + // pattern, failure otherwise. + if (options.flipNegate) return false + return this.negate +} + +// set partial to true to test if, for example, +// "/a/b" matches the start of "/*/b/*/d" +// Partial means, if you run out of file before you run +// out of pattern, then that's fine, as long as all +// the parts match. +Minimatch.prototype.matchOne = function (file, pattern, partial) { + var options = this.options + + this.debug('matchOne', + { 'this': this, file: file, pattern: pattern }) + + this.debug('matchOne', file.length, pattern.length) + + for (var fi = 0, + pi = 0, + fl = file.length, + pl = pattern.length + ; (fi < fl) && (pi < pl) + ; fi++, pi++) { + this.debug('matchOne loop') + var p = pattern[pi] + var f = file[fi] + + this.debug(pattern, p, f) + + // should be impossible. + // some invalid regexp stuff in the set. + if (p === false) return false + + if (p === GLOBSTAR) { + this.debug('GLOBSTAR', [pattern, p, f]) + + // "**" + // a/**/b/**/c would match the following: + // a/b/x/y/z/c + // a/x/y/z/b/c + // a/b/x/b/x/c + // a/b/c + // To do this, take the rest of the pattern after + // the **, and see if it would match the file remainder. + // If so, return success. + // If not, the ** "swallows" a segment, and try again. + // This is recursively awful. + // + // a/**/b/**/c matching a/b/x/y/z/c + // - a matches a + // - doublestar + // - matchOne(b/x/y/z/c, b/**/c) + // - b matches b + // - doublestar + // - matchOne(x/y/z/c, c) -> no + // - matchOne(y/z/c, c) -> no + // - matchOne(z/c, c) -> no + // - matchOne(c, c) yes, hit + var fr = fi + var pr = pi + 1 + if (pr === pl) { + this.debug('** at the end') + // a ** at the end will just swallow the rest. + // We have found a match. + // however, it will not swallow /.x, unless + // options.dot is set. + // . and .. are *never* matched by **, for explosively + // exponential reasons. + for (; fi < fl; fi++) { + if (file[fi] === '.' || file[fi] === '..' || + (!options.dot && file[fi].charAt(0) === '.')) return false + } + return true + } + + // ok, let's see if we can swallow whatever we can. + while (fr < fl) { + var swallowee = file[fr] + + this.debug('\nglobstar while', file, fr, pattern, pr, swallowee) + + // XXX remove this slice. Just pass the start index. + if (this.matchOne(file.slice(fr), pattern.slice(pr), partial)) { + this.debug('globstar found match!', fr, fl, swallowee) + // found a match. + return true + } else { + // can't swallow "." or ".." ever. + // can only swallow ".foo" when explicitly asked. + if (swallowee === '.' || swallowee === '..' || + (!options.dot && swallowee.charAt(0) === '.')) { + this.debug('dot detected!', file, fr, pattern, pr) + break + } + + // ** swallows a segment, and continue. + this.debug('globstar swallow a segment, and continue') + fr++ + } + } + + // no match was found. + // However, in partial mode, we can't say this is necessarily over. + // If there's more *pattern* left, then + if (partial) { + // ran out of file + this.debug('\n>>> no match, partial?', file, fr, pattern, pr) + if (fr === fl) return true + } + return false + } + + // something other than ** + // non-magic patterns just have to match exactly + // patterns with magic have been turned into regexps. + var hit + if (typeof p === 'string') { + if (options.nocase) { + hit = f.toLowerCase() === p.toLowerCase() + } else { + hit = f === p + } + this.debug('string match', p, f, hit) + } else { + hit = f.match(p) + this.debug('pattern match', p, f, hit) + } + + if (!hit) return false + } + + // Note: ending in / means that we'll get a final "" + // at the end of the pattern. This can only match a + // corresponding "" at the end of the file. + // If the file ends in /, then it can only match a + // a pattern that ends in /, unless the pattern just + // doesn't have any more for it. But, a/b/ should *not* + // match "a/b/*", even though "" matches against the + // [^/]*? pattern, except in partial mode, where it might + // simply not be reached yet. + // However, a/b/ should still satisfy a/* + + // now either we fell off the end of the pattern, or we're done. + if (fi === fl && pi === pl) { + // ran out of pattern and filename at the same time. + // an exact hit! + return true + } else if (fi === fl) { + // ran out of file, but still had pattern left. + // this is ok if we're doing the match as part of + // a glob fs traversal. + return partial + } else if (pi === pl) { + // ran out of pattern, still have file left. + // this is only acceptable if we're on the very last + // empty segment of a file with a trailing slash. + // a/* should match a/b/ + var emptyFileEnd = (fi === fl - 1) && (file[fi] === '') + return emptyFileEnd + } + + // should be unreachable. + throw new Error('wtf?') +} + +// replace stuff like \* with * +function globUnescape (s) { + return s.replace(/\\(.)/g, '$1') +} + +function regExpEscape (s) { + return s.replace(/[-[\]{}()*+?.,\\^$|#\s]/g, '\\$&') +} + +},{"brace-expansion":11,"path":22}],21:[function(require,module,exports){ +var wrappy = require('wrappy') +module.exports = wrappy(once) +module.exports.strict = wrappy(onceStrict) + +once.proto = once(function () { + Object.defineProperty(Function.prototype, 'once', { + value: function () { + return once(this) + }, + configurable: true + }) + + Object.defineProperty(Function.prototype, 'onceStrict', { + value: function () { + return onceStrict(this) + }, + configurable: true + }) +}) + +function once (fn) { + var f = function () { + if (f.called) return f.value + f.called = true + return f.value = fn.apply(this, arguments) + } + f.called = false + return f +} + +function onceStrict (fn) { + var f = function () { + if (f.called) + throw new Error(f.onceError) + f.called = true + return f.value = fn.apply(this, arguments) + } + var name = fn.name || 'Function wrapped with `once`' + f.onceError = name + " shouldn't be called more than once" + f.called = false + return f +} + +},{"wrappy":29}],22:[function(require,module,exports){ +(function (process){ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +// resolves . and .. elements in a path array with directory names there +// must be no slashes, empty elements, or device names (c:\) in the array +// (so also no leading and trailing slashes - it does not distinguish +// relative and absolute paths) +function normalizeArray(parts, allowAboveRoot) { + // if the path tries to go above the root, `up` ends up > 0 + var up = 0; + for (var i = parts.length - 1; i >= 0; i--) { + var last = parts[i]; + if (last === '.') { + parts.splice(i, 1); + } else if (last === '..') { + parts.splice(i, 1); + up++; + } else if (up) { + parts.splice(i, 1); + up--; + } + } + + // if the path is allowed to go above the root, restore leading ..s + if (allowAboveRoot) { + for (; up--; up) { + parts.unshift('..'); + } + } + + return parts; +} + +// Split a filename into [root, dir, basename, ext], unix version +// 'root' is just a slash, or nothing. +var splitPathRe = + /^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/; +var splitPath = function(filename) { + return splitPathRe.exec(filename).slice(1); +}; + +// path.resolve([from ...], to) +// posix version +exports.resolve = function() { + var resolvedPath = '', + resolvedAbsolute = false; + + for (var i = arguments.length - 1; i >= -1 && !resolvedAbsolute; i--) { + var path = (i >= 0) ? arguments[i] : process.cwd(); + + // Skip empty and invalid entries + if (typeof path !== 'string') { + throw new TypeError('Arguments to path.resolve must be strings'); + } else if (!path) { + continue; + } + + resolvedPath = path + '/' + resolvedPath; + resolvedAbsolute = path.charAt(0) === '/'; + } + + // At this point the path should be resolved to a full absolute path, but + // handle relative paths to be safe (might happen when process.cwd() fails) + + // Normalize the path + resolvedPath = normalizeArray(filter(resolvedPath.split('/'), function(p) { + return !!p; + }), !resolvedAbsolute).join('/'); + + return ((resolvedAbsolute ? '/' : '') + resolvedPath) || '.'; +}; + +// path.normalize(path) +// posix version +exports.normalize = function(path) { + var isAbsolute = exports.isAbsolute(path), + trailingSlash = substr(path, -1) === '/'; + + // Normalize the path + path = normalizeArray(filter(path.split('/'), function(p) { + return !!p; + }), !isAbsolute).join('/'); + + if (!path && !isAbsolute) { + path = '.'; + } + if (path && trailingSlash) { + path += '/'; + } + + return (isAbsolute ? '/' : '') + path; +}; + +// posix version +exports.isAbsolute = function(path) { + return path.charAt(0) === '/'; +}; + +// posix version +exports.join = function() { + var paths = Array.prototype.slice.call(arguments, 0); + return exports.normalize(filter(paths, function(p, index) { + if (typeof p !== 'string') { + throw new TypeError('Arguments to path.join must be strings'); + } + return p; + }).join('/')); +}; + + +// path.relative(from, to) +// posix version +exports.relative = function(from, to) { + from = exports.resolve(from).substr(1); + to = exports.resolve(to).substr(1); + + function trim(arr) { + var start = 0; + for (; start < arr.length; start++) { + if (arr[start] !== '') break; + } + + var end = arr.length - 1; + for (; end >= 0; end--) { + if (arr[end] !== '') break; + } + + if (start > end) return []; + return arr.slice(start, end - start + 1); + } + + var fromParts = trim(from.split('/')); + var toParts = trim(to.split('/')); + + var length = Math.min(fromParts.length, toParts.length); + var samePartsLength = length; + for (var i = 0; i < length; i++) { + if (fromParts[i] !== toParts[i]) { + samePartsLength = i; + break; + } + } + + var outputParts = []; + for (var i = samePartsLength; i < fromParts.length; i++) { + outputParts.push('..'); + } + + outputParts = outputParts.concat(toParts.slice(samePartsLength)); + + return outputParts.join('/'); +}; + +exports.sep = '/'; +exports.delimiter = ':'; + +exports.dirname = function(path) { + var result = splitPath(path), + root = result[0], + dir = result[1]; + + if (!root && !dir) { + // No dirname whatsoever + return '.'; + } + + if (dir) { + // It has a dirname, strip trailing slash + dir = dir.substr(0, dir.length - 1); + } + + return root + dir; +}; + + +exports.basename = function(path, ext) { + var f = splitPath(path)[2]; + // TODO: make this comparison case-insensitive on windows? + if (ext && f.substr(-1 * ext.length) === ext) { + f = f.substr(0, f.length - ext.length); + } + return f; +}; + + +exports.extname = function(path) { + return splitPath(path)[3]; +}; + +function filter (xs, f) { + if (xs.filter) return xs.filter(f); + var res = []; + for (var i = 0; i < xs.length; i++) { + if (f(xs[i], i, xs)) res.push(xs[i]); + } + return res; +} + +// String.prototype.substr - negative index don't work in IE8 +var substr = 'ab'.substr(-1) === 'b' + ? function (str, start, len) { return str.substr(start, len) } + : function (str, start, len) { + if (start < 0) start = str.length + start; + return str.substr(start, len); + } +; + +}).call(this,require('_process')) +},{"_process":24}],23:[function(require,module,exports){ +(function (process){ +'use strict'; + +function posix(path) { + return path.charAt(0) === '/'; +} + +function win32(path) { + // https://github.com/nodejs/node/blob/b3fcc245fb25539909ef1d5eaa01dbf92e168633/lib/path.js#L56 + var splitDeviceRe = /^([a-zA-Z]:|[\\\/]{2}[^\\\/]+[\\\/]+[^\\\/]+)?([\\\/])?([\s\S]*?)$/; + var result = splitDeviceRe.exec(path); + var device = result[1] || ''; + var isUnc = Boolean(device && device.charAt(1) !== ':'); + + // UNC paths are always absolute + return Boolean(result[2] || isUnc); +} + +module.exports = process.platform === 'win32' ? win32 : posix; +module.exports.posix = posix; +module.exports.win32 = win32; + +}).call(this,require('_process')) +},{"_process":24}],24:[function(require,module,exports){ +// shim for using process in browser +var process = module.exports = {}; + +// cached from whatever global is present so that test runners that stub it +// don't break things. But we need to wrap it in a try catch in case it is +// wrapped in strict mode code which doesn't define any globals. It's inside a +// function because try/catches deoptimize in certain engines. + +var cachedSetTimeout; +var cachedClearTimeout; + +function defaultSetTimout() { + throw new Error('setTimeout has not been defined'); +} +function defaultClearTimeout () { + throw new Error('clearTimeout has not been defined'); +} +(function () { + try { + if (typeof setTimeout === 'function') { + cachedSetTimeout = setTimeout; + } else { + cachedSetTimeout = defaultSetTimout; + } + } catch (e) { + cachedSetTimeout = defaultSetTimout; + } + try { + if (typeof clearTimeout === 'function') { + cachedClearTimeout = clearTimeout; + } else { + cachedClearTimeout = defaultClearTimeout; + } + } catch (e) { + cachedClearTimeout = defaultClearTimeout; + } +} ()) +function runTimeout(fun) { + if (cachedSetTimeout === setTimeout) { + //normal enviroments in sane situations + return setTimeout(fun, 0); + } + // if setTimeout wasn't available but was latter defined + if ((cachedSetTimeout === defaultSetTimout || !cachedSetTimeout) && setTimeout) { + cachedSetTimeout = setTimeout; + return setTimeout(fun, 0); + } + try { + // when when somebody has screwed with setTimeout but no I.E. maddness + return cachedSetTimeout(fun, 0); + } catch(e){ + try { + // When we are in I.E. but the script has been evaled so I.E. doesn't trust the global object when called normally + return cachedSetTimeout.call(null, fun, 0); + } catch(e){ + // same as above but when it's a version of I.E. that must have the global object for 'this', hopfully our context correct otherwise it will throw a global error + return cachedSetTimeout.call(this, fun, 0); + } + } + + +} +function runClearTimeout(marker) { + if (cachedClearTimeout === clearTimeout) { + //normal enviroments in sane situations + return clearTimeout(marker); + } + // if clearTimeout wasn't available but was latter defined + if ((cachedClearTimeout === defaultClearTimeout || !cachedClearTimeout) && clearTimeout) { + cachedClearTimeout = clearTimeout; + return clearTimeout(marker); + } + try { + // when when somebody has screwed with setTimeout but no I.E. maddness + return cachedClearTimeout(marker); + } catch (e){ + try { + // When we are in I.E. but the script has been evaled so I.E. doesn't trust the global object when called normally + return cachedClearTimeout.call(null, marker); + } catch (e){ + // same as above but when it's a version of I.E. that must have the global object for 'this', hopfully our context correct otherwise it will throw a global error. + // Some versions of I.E. have different rules for clearTimeout vs setTimeout + return cachedClearTimeout.call(this, marker); + } + } + + + +} +var queue = []; +var draining = false; +var currentQueue; +var queueIndex = -1; + +function cleanUpNextTick() { + if (!draining || !currentQueue) { + return; + } + draining = false; + if (currentQueue.length) { + queue = currentQueue.concat(queue); + } else { + queueIndex = -1; + } + if (queue.length) { + drainQueue(); + } +} + +function drainQueue() { + if (draining) { + return; + } + var timeout = runTimeout(cleanUpNextTick); + draining = true; + + var len = queue.length; + while(len) { + currentQueue = queue; + queue = []; + while (++queueIndex < len) { + if (currentQueue) { + currentQueue[queueIndex].run(); + } + } + queueIndex = -1; + len = queue.length; + } + currentQueue = null; + draining = false; + runClearTimeout(timeout); +} + +process.nextTick = function (fun) { + var args = new Array(arguments.length - 1); + if (arguments.length > 1) { + for (var i = 1; i < arguments.length; i++) { + args[i - 1] = arguments[i]; + } + } + queue.push(new Item(fun, args)); + if (queue.length === 1 && !draining) { + runTimeout(drainQueue); + } +}; + +// v8 likes predictible objects +function Item(fun, array) { + this.fun = fun; + this.array = array; +} +Item.prototype.run = function () { + this.fun.apply(null, this.array); +}; +process.title = 'browser'; +process.browser = true; +process.env = {}; +process.argv = []; +process.version = ''; // empty string to avoid regexp issues +process.versions = {}; + +function noop() {} + +process.on = noop; +process.addListener = noop; +process.once = noop; +process.off = noop; +process.removeListener = noop; +process.removeAllListeners = noop; +process.emit = noop; +process.prependListener = noop; +process.prependOnceListener = noop; + +process.listeners = function (name) { return [] } + +process.binding = function (name) { + throw new Error('process.binding is not supported'); +}; + +process.cwd = function () { return '/' }; +process.chdir = function (dir) { + throw new Error('process.chdir is not supported'); +}; +process.umask = function() { return 0; }; + +},{}],25:[function(require,module,exports){ +// Underscore.js 1.8.3 +// http://underscorejs.org +// (c) 2009-2015 Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors +// Underscore may be freely distributed under the MIT license. + +(function() { + + // Baseline setup + // -------------- + + // Establish the root object, `window` in the browser, or `exports` on the server. + var root = this; + + // Save the previous value of the `_` variable. + var previousUnderscore = root._; + + // Save bytes in the minified (but not gzipped) version: + var ArrayProto = Array.prototype, ObjProto = Object.prototype, FuncProto = Function.prototype; + + // Create quick reference variables for speed access to core prototypes. + var + push = ArrayProto.push, + slice = ArrayProto.slice, + toString = ObjProto.toString, + hasOwnProperty = ObjProto.hasOwnProperty; + + // All **ECMAScript 5** native function implementations that we hope to use + // are declared here. + var + nativeIsArray = Array.isArray, + nativeKeys = Object.keys, + nativeBind = FuncProto.bind, + nativeCreate = Object.create; + + // Naked function reference for surrogate-prototype-swapping. + var Ctor = function(){}; + + // Create a safe reference to the Underscore object for use below. + var _ = function(obj) { + if (obj instanceof _) return obj; + if (!(this instanceof _)) return new _(obj); + this._wrapped = obj; + }; + + // Export the Underscore object for **Node.js**, with + // backwards-compatibility for the old `require()` API. If we're in + // the browser, add `_` as a global object. + if (typeof exports !== 'undefined') { + if (typeof module !== 'undefined' && module.exports) { + exports = module.exports = _; + } + exports._ = _; + } else { + root._ = _; + } + + // Current version. + _.VERSION = '1.8.3'; + + // Internal function that returns an efficient (for current engines) version + // of the passed-in callback, to be repeatedly applied in other Underscore + // functions. + var optimizeCb = function(func, context, argCount) { + if (context === void 0) return func; + switch (argCount == null ? 3 : argCount) { + case 1: return function(value) { + return func.call(context, value); + }; + case 2: return function(value, other) { + return func.call(context, value, other); + }; + case 3: return function(value, index, collection) { + return func.call(context, value, index, collection); + }; + case 4: return function(accumulator, value, index, collection) { + return func.call(context, accumulator, value, index, collection); + }; + } + return function() { + return func.apply(context, arguments); + }; + }; + + // A mostly-internal function to generate callbacks that can be applied + // to each element in a collection, returning the desired result — either + // identity, an arbitrary callback, a property matcher, or a property accessor. + var cb = function(value, context, argCount) { + if (value == null) return _.identity; + if (_.isFunction(value)) return optimizeCb(value, context, argCount); + if (_.isObject(value)) return _.matcher(value); + return _.property(value); + }; + _.iteratee = function(value, context) { + return cb(value, context, Infinity); + }; + + // An internal function for creating assigner functions. + var createAssigner = function(keysFunc, undefinedOnly) { + return function(obj) { + var length = arguments.length; + if (length < 2 || obj == null) return obj; + for (var index = 1; index < length; index++) { + var source = arguments[index], + keys = keysFunc(source), + l = keys.length; + for (var i = 0; i < l; i++) { + var key = keys[i]; + if (!undefinedOnly || obj[key] === void 0) obj[key] = source[key]; + } + } + return obj; + }; + }; + + // An internal function for creating a new object that inherits from another. + var baseCreate = function(prototype) { + if (!_.isObject(prototype)) return {}; + if (nativeCreate) return nativeCreate(prototype); + Ctor.prototype = prototype; + var result = new Ctor; + Ctor.prototype = null; + return result; + }; + + var property = function(key) { + return function(obj) { + return obj == null ? void 0 : obj[key]; + }; + }; + + // Helper for collection methods to determine whether a collection + // should be iterated as an array or as an object + // Related: http://people.mozilla.org/~jorendorff/es6-draft.html#sec-tolength + // Avoids a very nasty iOS 8 JIT bug on ARM-64. #2094 + var MAX_ARRAY_INDEX = Math.pow(2, 53) - 1; + var getLength = property('length'); + var isArrayLike = function(collection) { + var length = getLength(collection); + return typeof length == 'number' && length >= 0 && length <= MAX_ARRAY_INDEX; + }; + + // Collection Functions + // -------------------- + + // The cornerstone, an `each` implementation, aka `forEach`. + // Handles raw objects in addition to array-likes. Treats all + // sparse array-likes as if they were dense. + _.each = _.forEach = function(obj, iteratee, context) { + iteratee = optimizeCb(iteratee, context); + var i, length; + if (isArrayLike(obj)) { + for (i = 0, length = obj.length; i < length; i++) { + iteratee(obj[i], i, obj); + } + } else { + var keys = _.keys(obj); + for (i = 0, length = keys.length; i < length; i++) { + iteratee(obj[keys[i]], keys[i], obj); + } + } + return obj; + }; + + // Return the results of applying the iteratee to each element. + _.map = _.collect = function(obj, iteratee, context) { + iteratee = cb(iteratee, context); + var keys = !isArrayLike(obj) && _.keys(obj), + length = (keys || obj).length, + results = Array(length); + for (var index = 0; index < length; index++) { + var currentKey = keys ? keys[index] : index; + results[index] = iteratee(obj[currentKey], currentKey, obj); + } + return results; + }; + + // Create a reducing function iterating left or right. + function createReduce(dir) { + // Optimized iterator function as using arguments.length + // in the main function will deoptimize the, see #1991. + function iterator(obj, iteratee, memo, keys, index, length) { + for (; index >= 0 && index < length; index += dir) { + var currentKey = keys ? keys[index] : index; + memo = iteratee(memo, obj[currentKey], currentKey, obj); + } + return memo; + } + + return function(obj, iteratee, memo, context) { + iteratee = optimizeCb(iteratee, context, 4); + var keys = !isArrayLike(obj) && _.keys(obj), + length = (keys || obj).length, + index = dir > 0 ? 0 : length - 1; + // Determine the initial value if none is provided. + if (arguments.length < 3) { + memo = obj[keys ? keys[index] : index]; + index += dir; + } + return iterator(obj, iteratee, memo, keys, index, length); + }; + } + + // **Reduce** builds up a single result from a list of values, aka `inject`, + // or `foldl`. + _.reduce = _.foldl = _.inject = createReduce(1); + + // The right-associative version of reduce, also known as `foldr`. + _.reduceRight = _.foldr = createReduce(-1); + + // Return the first value which passes a truth test. Aliased as `detect`. + _.find = _.detect = function(obj, predicate, context) { + var key; + if (isArrayLike(obj)) { + key = _.findIndex(obj, predicate, context); + } else { + key = _.findKey(obj, predicate, context); + } + if (key !== void 0 && key !== -1) return obj[key]; + }; + + // Return all the elements that pass a truth test. + // Aliased as `select`. + _.filter = _.select = function(obj, predicate, context) { + var results = []; + predicate = cb(predicate, context); + _.each(obj, function(value, index, list) { + if (predicate(value, index, list)) results.push(value); + }); + return results; + }; + + // Return all the elements for which a truth test fails. + _.reject = function(obj, predicate, context) { + return _.filter(obj, _.negate(cb(predicate)), context); + }; + + // Determine whether all of the elements match a truth test. + // Aliased as `all`. + _.every = _.all = function(obj, predicate, context) { + predicate = cb(predicate, context); + var keys = !isArrayLike(obj) && _.keys(obj), + length = (keys || obj).length; + for (var index = 0; index < length; index++) { + var currentKey = keys ? keys[index] : index; + if (!predicate(obj[currentKey], currentKey, obj)) return false; + } + return true; + }; + + // Determine if at least one element in the object matches a truth test. + // Aliased as `any`. + _.some = _.any = function(obj, predicate, context) { + predicate = cb(predicate, context); + var keys = !isArrayLike(obj) && _.keys(obj), + length = (keys || obj).length; + for (var index = 0; index < length; index++) { + var currentKey = keys ? keys[index] : index; + if (predicate(obj[currentKey], currentKey, obj)) return true; + } + return false; + }; + + // Determine if the array or object contains a given item (using `===`). + // Aliased as `includes` and `include`. + _.contains = _.includes = _.include = function(obj, item, fromIndex, guard) { + if (!isArrayLike(obj)) obj = _.values(obj); + if (typeof fromIndex != 'number' || guard) fromIndex = 0; + return _.indexOf(obj, item, fromIndex) >= 0; + }; + + // Invoke a method (with arguments) on every item in a collection. + _.invoke = function(obj, method) { + var args = slice.call(arguments, 2); + var isFunc = _.isFunction(method); + return _.map(obj, function(value) { + var func = isFunc ? method : value[method]; + return func == null ? func : func.apply(value, args); + }); + }; + + // Convenience version of a common use case of `map`: fetching a property. + _.pluck = function(obj, key) { + return _.map(obj, _.property(key)); + }; + + // Convenience version of a common use case of `filter`: selecting only objects + // containing specific `key:value` pairs. + _.where = function(obj, attrs) { + return _.filter(obj, _.matcher(attrs)); + }; + + // Convenience version of a common use case of `find`: getting the first object + // containing specific `key:value` pairs. + _.findWhere = function(obj, attrs) { + return _.find(obj, _.matcher(attrs)); + }; + + // Return the maximum element (or element-based computation). + _.max = function(obj, iteratee, context) { + var result = -Infinity, lastComputed = -Infinity, + value, computed; + if (iteratee == null && obj != null) { + obj = isArrayLike(obj) ? obj : _.values(obj); + for (var i = 0, length = obj.length; i < length; i++) { + value = obj[i]; + if (value > result) { + result = value; + } + } + } else { + iteratee = cb(iteratee, context); + _.each(obj, function(value, index, list) { + computed = iteratee(value, index, list); + if (computed > lastComputed || computed === -Infinity && result === -Infinity) { + result = value; + lastComputed = computed; + } + }); + } + return result; + }; + + // Return the minimum element (or element-based computation). + _.min = function(obj, iteratee, context) { + var result = Infinity, lastComputed = Infinity, + value, computed; + if (iteratee == null && obj != null) { + obj = isArrayLike(obj) ? obj : _.values(obj); + for (var i = 0, length = obj.length; i < length; i++) { + value = obj[i]; + if (value < result) { + result = value; + } + } + } else { + iteratee = cb(iteratee, context); + _.each(obj, function(value, index, list) { + computed = iteratee(value, index, list); + if (computed < lastComputed || computed === Infinity && result === Infinity) { + result = value; + lastComputed = computed; + } + }); + } + return result; + }; + + // Shuffle a collection, using the modern version of the + // [Fisher-Yates shuffle](http://en.wikipedia.org/wiki/Fisher–Yates_shuffle). + _.shuffle = function(obj) { + var set = isArrayLike(obj) ? obj : _.values(obj); + var length = set.length; + var shuffled = Array(length); + for (var index = 0, rand; index < length; index++) { + rand = _.random(0, index); + if (rand !== index) shuffled[index] = shuffled[rand]; + shuffled[rand] = set[index]; + } + return shuffled; + }; + + // Sample **n** random values from a collection. + // If **n** is not specified, returns a single random element. + // The internal `guard` argument allows it to work with `map`. + _.sample = function(obj, n, guard) { + if (n == null || guard) { + if (!isArrayLike(obj)) obj = _.values(obj); + return obj[_.random(obj.length - 1)]; + } + return _.shuffle(obj).slice(0, Math.max(0, n)); + }; + + // Sort the object's values by a criterion produced by an iteratee. + _.sortBy = function(obj, iteratee, context) { + iteratee = cb(iteratee, context); + return _.pluck(_.map(obj, function(value, index, list) { + return { + value: value, + index: index, + criteria: iteratee(value, index, list) + }; + }).sort(function(left, right) { + var a = left.criteria; + var b = right.criteria; + if (a !== b) { + if (a > b || a === void 0) return 1; + if (a < b || b === void 0) return -1; + } + return left.index - right.index; + }), 'value'); + }; + + // An internal function used for aggregate "group by" operations. + var group = function(behavior) { + return function(obj, iteratee, context) { + var result = {}; + iteratee = cb(iteratee, context); + _.each(obj, function(value, index) { + var key = iteratee(value, index, obj); + behavior(result, value, key); + }); + return result; + }; + }; + + // Groups the object's values by a criterion. Pass either a string attribute + // to group by, or a function that returns the criterion. + _.groupBy = group(function(result, value, key) { + if (_.has(result, key)) result[key].push(value); else result[key] = [value]; + }); + + // Indexes the object's values by a criterion, similar to `groupBy`, but for + // when you know that your index values will be unique. + _.indexBy = group(function(result, value, key) { + result[key] = value; + }); + + // Counts instances of an object that group by a certain criterion. Pass + // either a string attribute to count by, or a function that returns the + // criterion. + _.countBy = group(function(result, value, key) { + if (_.has(result, key)) result[key]++; else result[key] = 1; + }); + + // Safely create a real, live array from anything iterable. + _.toArray = function(obj) { + if (!obj) return []; + if (_.isArray(obj)) return slice.call(obj); + if (isArrayLike(obj)) return _.map(obj, _.identity); + return _.values(obj); + }; + + // Return the number of elements in an object. + _.size = function(obj) { + if (obj == null) return 0; + return isArrayLike(obj) ? obj.length : _.keys(obj).length; + }; + + // Split a collection into two arrays: one whose elements all satisfy the given + // predicate, and one whose elements all do not satisfy the predicate. + _.partition = function(obj, predicate, context) { + predicate = cb(predicate, context); + var pass = [], fail = []; + _.each(obj, function(value, key, obj) { + (predicate(value, key, obj) ? pass : fail).push(value); + }); + return [pass, fail]; + }; + + // Array Functions + // --------------- + + // Get the first element of an array. Passing **n** will return the first N + // values in the array. Aliased as `head` and `take`. The **guard** check + // allows it to work with `_.map`. + _.first = _.head = _.take = function(array, n, guard) { + if (array == null) return void 0; + if (n == null || guard) return array[0]; + return _.initial(array, array.length - n); + }; + + // Returns everything but the last entry of the array. Especially useful on + // the arguments object. Passing **n** will return all the values in + // the array, excluding the last N. + _.initial = function(array, n, guard) { + return slice.call(array, 0, Math.max(0, array.length - (n == null || guard ? 1 : n))); + }; + + // Get the last element of an array. Passing **n** will return the last N + // values in the array. + _.last = function(array, n, guard) { + if (array == null) return void 0; + if (n == null || guard) return array[array.length - 1]; + return _.rest(array, Math.max(0, array.length - n)); + }; + + // Returns everything but the first entry of the array. Aliased as `tail` and `drop`. + // Especially useful on the arguments object. Passing an **n** will return + // the rest N values in the array. + _.rest = _.tail = _.drop = function(array, n, guard) { + return slice.call(array, n == null || guard ? 1 : n); + }; + + // Trim out all falsy values from an array. + _.compact = function(array) { + return _.filter(array, _.identity); + }; + + // Internal implementation of a recursive `flatten` function. + var flatten = function(input, shallow, strict, startIndex) { + var output = [], idx = 0; + for (var i = startIndex || 0, length = getLength(input); i < length; i++) { + var value = input[i]; + if (isArrayLike(value) && (_.isArray(value) || _.isArguments(value))) { + //flatten current level of array or arguments object + if (!shallow) value = flatten(value, shallow, strict); + var j = 0, len = value.length; + output.length += len; + while (j < len) { + output[idx++] = value[j++]; + } + } else if (!strict) { + output[idx++] = value; + } + } + return output; + }; + + // Flatten out an array, either recursively (by default), or just one level. + _.flatten = function(array, shallow) { + return flatten(array, shallow, false); + }; + + // Return a version of the array that does not contain the specified value(s). + _.without = function(array) { + return _.difference(array, slice.call(arguments, 1)); + }; + + // Produce a duplicate-free version of the array. If the array has already + // been sorted, you have the option of using a faster algorithm. + // Aliased as `unique`. + _.uniq = _.unique = function(array, isSorted, iteratee, context) { + if (!_.isBoolean(isSorted)) { + context = iteratee; + iteratee = isSorted; + isSorted = false; + } + if (iteratee != null) iteratee = cb(iteratee, context); + var result = []; + var seen = []; + for (var i = 0, length = getLength(array); i < length; i++) { + var value = array[i], + computed = iteratee ? iteratee(value, i, array) : value; + if (isSorted) { + if (!i || seen !== computed) result.push(value); + seen = computed; + } else if (iteratee) { + if (!_.contains(seen, computed)) { + seen.push(computed); + result.push(value); + } + } else if (!_.contains(result, value)) { + result.push(value); + } + } + return result; + }; + + // Produce an array that contains the union: each distinct element from all of + // the passed-in arrays. + _.union = function() { + return _.uniq(flatten(arguments, true, true)); + }; + + // Produce an array that contains every item shared between all the + // passed-in arrays. + _.intersection = function(array) { + var result = []; + var argsLength = arguments.length; + for (var i = 0, length = getLength(array); i < length; i++) { + var item = array[i]; + if (_.contains(result, item)) continue; + for (var j = 1; j < argsLength; j++) { + if (!_.contains(arguments[j], item)) break; + } + if (j === argsLength) result.push(item); + } + return result; + }; + + // Take the difference between one array and a number of other arrays. + // Only the elements present in just the first array will remain. + _.difference = function(array) { + var rest = flatten(arguments, true, true, 1); + return _.filter(array, function(value){ + return !_.contains(rest, value); + }); + }; + + // Zip together multiple lists into a single array -- elements that share + // an index go together. + _.zip = function() { + return _.unzip(arguments); + }; + + // Complement of _.zip. Unzip accepts an array of arrays and groups + // each array's elements on shared indices + _.unzip = function(array) { + var length = array && _.max(array, getLength).length || 0; + var result = Array(length); + + for (var index = 0; index < length; index++) { + result[index] = _.pluck(array, index); + } + return result; + }; + + // Converts lists into objects. Pass either a single array of `[key, value]` + // pairs, or two parallel arrays of the same length -- one of keys, and one of + // the corresponding values. + _.object = function(list, values) { + var result = {}; + for (var i = 0, length = getLength(list); i < length; i++) { + if (values) { + result[list[i]] = values[i]; + } else { + result[list[i][0]] = list[i][1]; + } + } + return result; + }; + + // Generator function to create the findIndex and findLastIndex functions + function createPredicateIndexFinder(dir) { + return function(array, predicate, context) { + predicate = cb(predicate, context); + var length = getLength(array); + var index = dir > 0 ? 0 : length - 1; + for (; index >= 0 && index < length; index += dir) { + if (predicate(array[index], index, array)) return index; + } + return -1; + }; + } + + // Returns the first index on an array-like that passes a predicate test + _.findIndex = createPredicateIndexFinder(1); + _.findLastIndex = createPredicateIndexFinder(-1); + + // Use a comparator function to figure out the smallest index at which + // an object should be inserted so as to maintain order. Uses binary search. + _.sortedIndex = function(array, obj, iteratee, context) { + iteratee = cb(iteratee, context, 1); + var value = iteratee(obj); + var low = 0, high = getLength(array); + while (low < high) { + var mid = Math.floor((low + high) / 2); + if (iteratee(array[mid]) < value) low = mid + 1; else high = mid; + } + return low; + }; + + // Generator function to create the indexOf and lastIndexOf functions + function createIndexFinder(dir, predicateFind, sortedIndex) { + return function(array, item, idx) { + var i = 0, length = getLength(array); + if (typeof idx == 'number') { + if (dir > 0) { + i = idx >= 0 ? idx : Math.max(idx + length, i); + } else { + length = idx >= 0 ? Math.min(idx + 1, length) : idx + length + 1; + } + } else if (sortedIndex && idx && length) { + idx = sortedIndex(array, item); + return array[idx] === item ? idx : -1; + } + if (item !== item) { + idx = predicateFind(slice.call(array, i, length), _.isNaN); + return idx >= 0 ? idx + i : -1; + } + for (idx = dir > 0 ? i : length - 1; idx >= 0 && idx < length; idx += dir) { + if (array[idx] === item) return idx; + } + return -1; + }; + } + + // Return the position of the first occurrence of an item in an array, + // or -1 if the item is not included in the array. + // If the array is large and already in sort order, pass `true` + // for **isSorted** to use binary search. + _.indexOf = createIndexFinder(1, _.findIndex, _.sortedIndex); + _.lastIndexOf = createIndexFinder(-1, _.findLastIndex); + + // Generate an integer Array containing an arithmetic progression. A port of + // the native Python `range()` function. See + // [the Python documentation](http://docs.python.org/library/functions.html#range). + _.range = function(start, stop, step) { + if (stop == null) { + stop = start || 0; + start = 0; + } + step = step || 1; + + var length = Math.max(Math.ceil((stop - start) / step), 0); + var range = Array(length); + + for (var idx = 0; idx < length; idx++, start += step) { + range[idx] = start; + } + + return range; + }; + + // Function (ahem) Functions + // ------------------ + + // Determines whether to execute a function as a constructor + // or a normal function with the provided arguments + var executeBound = function(sourceFunc, boundFunc, context, callingContext, args) { + if (!(callingContext instanceof boundFunc)) return sourceFunc.apply(context, args); + var self = baseCreate(sourceFunc.prototype); + var result = sourceFunc.apply(self, args); + if (_.isObject(result)) return result; + return self; + }; + + // Create a function bound to a given object (assigning `this`, and arguments, + // optionally). Delegates to **ECMAScript 5**'s native `Function.bind` if + // available. + _.bind = function(func, context) { + if (nativeBind && func.bind === nativeBind) return nativeBind.apply(func, slice.call(arguments, 1)); + if (!_.isFunction(func)) throw new TypeError('Bind must be called on a function'); + var args = slice.call(arguments, 2); + var bound = function() { + return executeBound(func, bound, context, this, args.concat(slice.call(arguments))); + }; + return bound; + }; + + // Partially apply a function by creating a version that has had some of its + // arguments pre-filled, without changing its dynamic `this` context. _ acts + // as a placeholder, allowing any combination of arguments to be pre-filled. + _.partial = function(func) { + var boundArgs = slice.call(arguments, 1); + var bound = function() { + var position = 0, length = boundArgs.length; + var args = Array(length); + for (var i = 0; i < length; i++) { + args[i] = boundArgs[i] === _ ? arguments[position++] : boundArgs[i]; + } + while (position < arguments.length) args.push(arguments[position++]); + return executeBound(func, bound, this, this, args); + }; + return bound; + }; + + // Bind a number of an object's methods to that object. Remaining arguments + // are the method names to be bound. Useful for ensuring that all callbacks + // defined on an object belong to it. + _.bindAll = function(obj) { + var i, length = arguments.length, key; + if (length <= 1) throw new Error('bindAll must be passed function names'); + for (i = 1; i < length; i++) { + key = arguments[i]; + obj[key] = _.bind(obj[key], obj); + } + return obj; + }; + + // Memoize an expensive function by storing its results. + _.memoize = function(func, hasher) { + var memoize = function(key) { + var cache = memoize.cache; + var address = '' + (hasher ? hasher.apply(this, arguments) : key); + if (!_.has(cache, address)) cache[address] = func.apply(this, arguments); + return cache[address]; + }; + memoize.cache = {}; + return memoize; + }; + + // Delays a function for the given number of milliseconds, and then calls + // it with the arguments supplied. + _.delay = function(func, wait) { + var args = slice.call(arguments, 2); + return setTimeout(function(){ + return func.apply(null, args); + }, wait); + }; + + // Defers a function, scheduling it to run after the current call stack has + // cleared. + _.defer = _.partial(_.delay, _, 1); + + // Returns a function, that, when invoked, will only be triggered at most once + // during a given window of time. Normally, the throttled function will run + // as much as it can, without ever going more than once per `wait` duration; + // but if you'd like to disable the execution on the leading edge, pass + // `{leading: false}`. To disable execution on the trailing edge, ditto. + _.throttle = function(func, wait, options) { + var context, args, result; + var timeout = null; + var previous = 0; + if (!options) options = {}; + var later = function() { + previous = options.leading === false ? 0 : _.now(); + timeout = null; + result = func.apply(context, args); + if (!timeout) context = args = null; + }; + return function() { + var now = _.now(); + if (!previous && options.leading === false) previous = now; + var remaining = wait - (now - previous); + context = this; + args = arguments; + if (remaining <= 0 || remaining > wait) { + if (timeout) { + clearTimeout(timeout); + timeout = null; + } + previous = now; + result = func.apply(context, args); + if (!timeout) context = args = null; + } else if (!timeout && options.trailing !== false) { + timeout = setTimeout(later, remaining); + } + return result; + }; + }; + + // Returns a function, that, as long as it continues to be invoked, will not + // be triggered. The function will be called after it stops being called for + // N milliseconds. If `immediate` is passed, trigger the function on the + // leading edge, instead of the trailing. + _.debounce = function(func, wait, immediate) { + var timeout, args, context, timestamp, result; + + var later = function() { + var last = _.now() - timestamp; + + if (last < wait && last >= 0) { + timeout = setTimeout(later, wait - last); + } else { + timeout = null; + if (!immediate) { + result = func.apply(context, args); + if (!timeout) context = args = null; + } + } + }; + + return function() { + context = this; + args = arguments; + timestamp = _.now(); + var callNow = immediate && !timeout; + if (!timeout) timeout = setTimeout(later, wait); + if (callNow) { + result = func.apply(context, args); + context = args = null; + } + + return result; + }; + }; + + // Returns the first function passed as an argument to the second, + // allowing you to adjust arguments, run code before and after, and + // conditionally execute the original function. + _.wrap = function(func, wrapper) { + return _.partial(wrapper, func); + }; + + // Returns a negated version of the passed-in predicate. + _.negate = function(predicate) { + return function() { + return !predicate.apply(this, arguments); + }; + }; + + // Returns a function that is the composition of a list of functions, each + // consuming the return value of the function that follows. + _.compose = function() { + var args = arguments; + var start = args.length - 1; + return function() { + var i = start; + var result = args[start].apply(this, arguments); + while (i--) result = args[i].call(this, result); + return result; + }; + }; + + // Returns a function that will only be executed on and after the Nth call. + _.after = function(times, func) { + return function() { + if (--times < 1) { + return func.apply(this, arguments); + } + }; + }; + + // Returns a function that will only be executed up to (but not including) the Nth call. + _.before = function(times, func) { + var memo; + return function() { + if (--times > 0) { + memo = func.apply(this, arguments); + } + if (times <= 1) func = null; + return memo; + }; + }; + + // Returns a function that will be executed at most one time, no matter how + // often you call it. Useful for lazy initialization. + _.once = _.partial(_.before, 2); + + // Object Functions + // ---------------- + + // Keys in IE < 9 that won't be iterated by `for key in ...` and thus missed. + var hasEnumBug = !{toString: null}.propertyIsEnumerable('toString'); + var nonEnumerableProps = ['valueOf', 'isPrototypeOf', 'toString', + 'propertyIsEnumerable', 'hasOwnProperty', 'toLocaleString']; + + function collectNonEnumProps(obj, keys) { + var nonEnumIdx = nonEnumerableProps.length; + var constructor = obj.constructor; + var proto = (_.isFunction(constructor) && constructor.prototype) || ObjProto; + + // Constructor is a special case. + var prop = 'constructor'; + if (_.has(obj, prop) && !_.contains(keys, prop)) keys.push(prop); + + while (nonEnumIdx--) { + prop = nonEnumerableProps[nonEnumIdx]; + if (prop in obj && obj[prop] !== proto[prop] && !_.contains(keys, prop)) { + keys.push(prop); + } + } + } + + // Retrieve the names of an object's own properties. + // Delegates to **ECMAScript 5**'s native `Object.keys` + _.keys = function(obj) { + if (!_.isObject(obj)) return []; + if (nativeKeys) return nativeKeys(obj); + var keys = []; + for (var key in obj) if (_.has(obj, key)) keys.push(key); + // Ahem, IE < 9. + if (hasEnumBug) collectNonEnumProps(obj, keys); + return keys; + }; + + // Retrieve all the property names of an object. + _.allKeys = function(obj) { + if (!_.isObject(obj)) return []; + var keys = []; + for (var key in obj) keys.push(key); + // Ahem, IE < 9. + if (hasEnumBug) collectNonEnumProps(obj, keys); + return keys; + }; + + // Retrieve the values of an object's properties. + _.values = function(obj) { + var keys = _.keys(obj); + var length = keys.length; + var values = Array(length); + for (var i = 0; i < length; i++) { + values[i] = obj[keys[i]]; + } + return values; + }; + + // Returns the results of applying the iteratee to each element of the object + // In contrast to _.map it returns an object + _.mapObject = function(obj, iteratee, context) { + iteratee = cb(iteratee, context); + var keys = _.keys(obj), + length = keys.length, + results = {}, + currentKey; + for (var index = 0; index < length; index++) { + currentKey = keys[index]; + results[currentKey] = iteratee(obj[currentKey], currentKey, obj); + } + return results; + }; + + // Convert an object into a list of `[key, value]` pairs. + _.pairs = function(obj) { + var keys = _.keys(obj); + var length = keys.length; + var pairs = Array(length); + for (var i = 0; i < length; i++) { + pairs[i] = [keys[i], obj[keys[i]]]; + } + return pairs; + }; + + // Invert the keys and values of an object. The values must be serializable. + _.invert = function(obj) { + var result = {}; + var keys = _.keys(obj); + for (var i = 0, length = keys.length; i < length; i++) { + result[obj[keys[i]]] = keys[i]; + } + return result; + }; + + // Return a sorted list of the function names available on the object. + // Aliased as `methods` + _.functions = _.methods = function(obj) { + var names = []; + for (var key in obj) { + if (_.isFunction(obj[key])) names.push(key); + } + return names.sort(); + }; + + // Extend a given object with all the properties in passed-in object(s). + _.extend = createAssigner(_.allKeys); + + // Assigns a given object with all the own properties in the passed-in object(s) + // (https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object/assign) + _.extendOwn = _.assign = createAssigner(_.keys); + + // Returns the first key on an object that passes a predicate test + _.findKey = function(obj, predicate, context) { + predicate = cb(predicate, context); + var keys = _.keys(obj), key; + for (var i = 0, length = keys.length; i < length; i++) { + key = keys[i]; + if (predicate(obj[key], key, obj)) return key; + } + }; + + // Return a copy of the object only containing the whitelisted properties. + _.pick = function(object, oiteratee, context) { + var result = {}, obj = object, iteratee, keys; + if (obj == null) return result; + if (_.isFunction(oiteratee)) { + keys = _.allKeys(obj); + iteratee = optimizeCb(oiteratee, context); + } else { + keys = flatten(arguments, false, false, 1); + iteratee = function(value, key, obj) { return key in obj; }; + obj = Object(obj); + } + for (var i = 0, length = keys.length; i < length; i++) { + var key = keys[i]; + var value = obj[key]; + if (iteratee(value, key, obj)) result[key] = value; + } + return result; + }; + + // Return a copy of the object without the blacklisted properties. + _.omit = function(obj, iteratee, context) { + if (_.isFunction(iteratee)) { + iteratee = _.negate(iteratee); + } else { + var keys = _.map(flatten(arguments, false, false, 1), String); + iteratee = function(value, key) { + return !_.contains(keys, key); + }; + } + return _.pick(obj, iteratee, context); + }; + + // Fill in a given object with default properties. + _.defaults = createAssigner(_.allKeys, true); + + // Creates an object that inherits from the given prototype object. + // If additional properties are provided then they will be added to the + // created object. + _.create = function(prototype, props) { + var result = baseCreate(prototype); + if (props) _.extendOwn(result, props); + return result; + }; + + // Create a (shallow-cloned) duplicate of an object. + _.clone = function(obj) { + if (!_.isObject(obj)) return obj; + return _.isArray(obj) ? obj.slice() : _.extend({}, obj); + }; + + // Invokes interceptor with the obj, and then returns obj. + // The primary purpose of this method is to "tap into" a method chain, in + // order to perform operations on intermediate results within the chain. + _.tap = function(obj, interceptor) { + interceptor(obj); + return obj; + }; + + // Returns whether an object has a given set of `key:value` pairs. + _.isMatch = function(object, attrs) { + var keys = _.keys(attrs), length = keys.length; + if (object == null) return !length; + var obj = Object(object); + for (var i = 0; i < length; i++) { + var key = keys[i]; + if (attrs[key] !== obj[key] || !(key in obj)) return false; + } + return true; + }; + + + // Internal recursive comparison function for `isEqual`. + var eq = function(a, b, aStack, bStack) { + // Identical objects are equal. `0 === -0`, but they aren't identical. + // See the [Harmony `egal` proposal](http://wiki.ecmascript.org/doku.php?id=harmony:egal). + if (a === b) return a !== 0 || 1 / a === 1 / b; + // A strict comparison is necessary because `null == undefined`. + if (a == null || b == null) return a === b; + // Unwrap any wrapped objects. + if (a instanceof _) a = a._wrapped; + if (b instanceof _) b = b._wrapped; + // Compare `[[Class]]` names. + var className = toString.call(a); + if (className !== toString.call(b)) return false; + switch (className) { + // Strings, numbers, regular expressions, dates, and booleans are compared by value. + case '[object RegExp]': + // RegExps are coerced to strings for comparison (Note: '' + /a/i === '/a/i') + case '[object String]': + // Primitives and their corresponding object wrappers are equivalent; thus, `"5"` is + // equivalent to `new String("5")`. + return '' + a === '' + b; + case '[object Number]': + // `NaN`s are equivalent, but non-reflexive. + // Object(NaN) is equivalent to NaN + if (+a !== +a) return +b !== +b; + // An `egal` comparison is performed for other numeric values. + return +a === 0 ? 1 / +a === 1 / b : +a === +b; + case '[object Date]': + case '[object Boolean]': + // Coerce dates and booleans to numeric primitive values. Dates are compared by their + // millisecond representations. Note that invalid dates with millisecond representations + // of `NaN` are not equivalent. + return +a === +b; + } + + var areArrays = className === '[object Array]'; + if (!areArrays) { + if (typeof a != 'object' || typeof b != 'object') return false; + + // Objects with different constructors are not equivalent, but `Object`s or `Array`s + // from different frames are. + var aCtor = a.constructor, bCtor = b.constructor; + if (aCtor !== bCtor && !(_.isFunction(aCtor) && aCtor instanceof aCtor && + _.isFunction(bCtor) && bCtor instanceof bCtor) + && ('constructor' in a && 'constructor' in b)) { + return false; + } + } + // Assume equality for cyclic structures. The algorithm for detecting cyclic + // structures is adapted from ES 5.1 section 15.12.3, abstract operation `JO`. + + // Initializing stack of traversed objects. + // It's done here since we only need them for objects and arrays comparison. + aStack = aStack || []; + bStack = bStack || []; + var length = aStack.length; + while (length--) { + // Linear search. Performance is inversely proportional to the number of + // unique nested structures. + if (aStack[length] === a) return bStack[length] === b; + } + + // Add the first object to the stack of traversed objects. + aStack.push(a); + bStack.push(b); + + // Recursively compare objects and arrays. + if (areArrays) { + // Compare array lengths to determine if a deep comparison is necessary. + length = a.length; + if (length !== b.length) return false; + // Deep compare the contents, ignoring non-numeric properties. + while (length--) { + if (!eq(a[length], b[length], aStack, bStack)) return false; + } + } else { + // Deep compare objects. + var keys = _.keys(a), key; + length = keys.length; + // Ensure that both objects contain the same number of properties before comparing deep equality. + if (_.keys(b).length !== length) return false; + while (length--) { + // Deep compare each member + key = keys[length]; + if (!(_.has(b, key) && eq(a[key], b[key], aStack, bStack))) return false; + } + } + // Remove the first object from the stack of traversed objects. + aStack.pop(); + bStack.pop(); + return true; + }; + + // Perform a deep comparison to check if two objects are equal. + _.isEqual = function(a, b) { + return eq(a, b); + }; + + // Is a given array, string, or object empty? + // An "empty" object has no enumerable own-properties. + _.isEmpty = function(obj) { + if (obj == null) return true; + if (isArrayLike(obj) && (_.isArray(obj) || _.isString(obj) || _.isArguments(obj))) return obj.length === 0; + return _.keys(obj).length === 0; + }; + + // Is a given value a DOM element? + _.isElement = function(obj) { + return !!(obj && obj.nodeType === 1); + }; + + // Is a given value an array? + // Delegates to ECMA5's native Array.isArray + _.isArray = nativeIsArray || function(obj) { + return toString.call(obj) === '[object Array]'; + }; + + // Is a given variable an object? + _.isObject = function(obj) { + var type = typeof obj; + return type === 'function' || type === 'object' && !!obj; + }; + + // Add some isType methods: isArguments, isFunction, isString, isNumber, isDate, isRegExp, isError. + _.each(['Arguments', 'Function', 'String', 'Number', 'Date', 'RegExp', 'Error'], function(name) { + _['is' + name] = function(obj) { + return toString.call(obj) === '[object ' + name + ']'; + }; + }); + + // Define a fallback version of the method in browsers (ahem, IE < 9), where + // there isn't any inspectable "Arguments" type. + if (!_.isArguments(arguments)) { + _.isArguments = function(obj) { + return _.has(obj, 'callee'); + }; + } + + // Optimize `isFunction` if appropriate. Work around some typeof bugs in old v8, + // IE 11 (#1621), and in Safari 8 (#1929). + if (typeof /./ != 'function' && typeof Int8Array != 'object') { + _.isFunction = function(obj) { + return typeof obj == 'function' || false; + }; + } + + // Is a given object a finite number? + _.isFinite = function(obj) { + return isFinite(obj) && !isNaN(parseFloat(obj)); + }; + + // Is the given value `NaN`? (NaN is the only number which does not equal itself). + _.isNaN = function(obj) { + return _.isNumber(obj) && obj !== +obj; + }; + + // Is a given value a boolean? + _.isBoolean = function(obj) { + return obj === true || obj === false || toString.call(obj) === '[object Boolean]'; + }; + + // Is a given value equal to null? + _.isNull = function(obj) { + return obj === null; + }; + + // Is a given variable undefined? + _.isUndefined = function(obj) { + return obj === void 0; + }; + + // Shortcut function for checking if an object has a given property directly + // on itself (in other words, not on a prototype). + _.has = function(obj, key) { + return obj != null && hasOwnProperty.call(obj, key); + }; + + // Utility Functions + // ----------------- + + // Run Underscore.js in *noConflict* mode, returning the `_` variable to its + // previous owner. Returns a reference to the Underscore object. + _.noConflict = function() { + root._ = previousUnderscore; + return this; + }; + + // Keep the identity function around for default iteratees. + _.identity = function(value) { + return value; + }; + + // Predicate-generating functions. Often useful outside of Underscore. + _.constant = function(value) { + return function() { + return value; + }; + }; + + _.noop = function(){}; + + _.property = property; + + // Generates a function for a given object that returns a given property. + _.propertyOf = function(obj) { + return obj == null ? function(){} : function(key) { + return obj[key]; + }; + }; + + // Returns a predicate for checking whether an object has a given set of + // `key:value` pairs. + _.matcher = _.matches = function(attrs) { + attrs = _.extendOwn({}, attrs); + return function(obj) { + return _.isMatch(obj, attrs); + }; + }; + + // Run a function **n** times. + _.times = function(n, iteratee, context) { + var accum = Array(Math.max(0, n)); + iteratee = optimizeCb(iteratee, context, 1); + for (var i = 0; i < n; i++) accum[i] = iteratee(i); + return accum; + }; + + // Return a random integer between min and max (inclusive). + _.random = function(min, max) { + if (max == null) { + max = min; + min = 0; + } + return min + Math.floor(Math.random() * (max - min + 1)); + }; + + // A (possibly faster) way to get the current timestamp as an integer. + _.now = Date.now || function() { + return new Date().getTime(); + }; + + // List of HTML entities for escaping. + var escapeMap = { + '&': '&', + '<': '<', + '>': '>', + '"': '"', + "'": ''', + '`': '`' + }; + var unescapeMap = _.invert(escapeMap); + + // Functions for escaping and unescaping strings to/from HTML interpolation. + var createEscaper = function(map) { + var escaper = function(match) { + return map[match]; + }; + // Regexes for identifying a key that needs to be escaped + var source = '(?:' + _.keys(map).join('|') + ')'; + var testRegexp = RegExp(source); + var replaceRegexp = RegExp(source, 'g'); + return function(string) { + string = string == null ? '' : '' + string; + return testRegexp.test(string) ? string.replace(replaceRegexp, escaper) : string; + }; + }; + _.escape = createEscaper(escapeMap); + _.unescape = createEscaper(unescapeMap); + + // If the value of the named `property` is a function then invoke it with the + // `object` as context; otherwise, return it. + _.result = function(object, property, fallback) { + var value = object == null ? void 0 : object[property]; + if (value === void 0) { + value = fallback; + } + return _.isFunction(value) ? value.call(object) : value; + }; + + // Generate a unique integer id (unique within the entire client session). + // Useful for temporary DOM ids. + var idCounter = 0; + _.uniqueId = function(prefix) { + var id = ++idCounter + ''; + return prefix ? prefix + id : id; + }; + + // By default, Underscore uses ERB-style template delimiters, change the + // following template settings to use alternative delimiters. + _.templateSettings = { + evaluate : /<%([\s\S]+?)%>/g, + interpolate : /<%=([\s\S]+?)%>/g, + escape : /<%-([\s\S]+?)%>/g + }; + + // When customizing `templateSettings`, if you don't want to define an + // interpolation, evaluation or escaping regex, we need one that is + // guaranteed not to match. + var noMatch = /(.)^/; + + // Certain characters need to be escaped so that they can be put into a + // string literal. + var escapes = { + "'": "'", + '\\': '\\', + '\r': 'r', + '\n': 'n', + '\u2028': 'u2028', + '\u2029': 'u2029' + }; + + var escaper = /\\|'|\r|\n|\u2028|\u2029/g; + + var escapeChar = function(match) { + return '\\' + escapes[match]; + }; + + // JavaScript micro-templating, similar to John Resig's implementation. + // Underscore templating handles arbitrary delimiters, preserves whitespace, + // and correctly escapes quotes within interpolated code. + // NB: `oldSettings` only exists for backwards compatibility. + _.template = function(text, settings, oldSettings) { + if (!settings && oldSettings) settings = oldSettings; + settings = _.defaults({}, settings, _.templateSettings); + + // Combine delimiters into one regular expression via alternation. + var matcher = RegExp([ + (settings.escape || noMatch).source, + (settings.interpolate || noMatch).source, + (settings.evaluate || noMatch).source + ].join('|') + '|$', 'g'); + + // Compile the template source, escaping string literals appropriately. + var index = 0; + var source = "__p+='"; + text.replace(matcher, function(match, escape, interpolate, evaluate, offset) { + source += text.slice(index, offset).replace(escaper, escapeChar); + index = offset + match.length; + + if (escape) { + source += "'+\n((__t=(" + escape + "))==null?'':_.escape(__t))+\n'"; + } else if (interpolate) { + source += "'+\n((__t=(" + interpolate + "))==null?'':__t)+\n'"; + } else if (evaluate) { + source += "';\n" + evaluate + "\n__p+='"; + } + + // Adobe VMs need the match returned to produce the correct offest. + return match; + }); + source += "';\n"; + + // If a variable is not specified, place data values in local scope. + if (!settings.variable) source = 'with(obj||{}){\n' + source + '}\n'; + + source = "var __t,__p='',__j=Array.prototype.join," + + "print=function(){__p+=__j.call(arguments,'');};\n" + + source + 'return __p;\n'; + + try { + var render = new Function(settings.variable || 'obj', '_', source); + } catch (e) { + e.source = source; + throw e; + } + + var template = function(data) { + return render.call(this, data, _); + }; + + // Provide the compiled source as a convenience for precompilation. + var argument = settings.variable || 'obj'; + template.source = 'function(' + argument + '){\n' + source + '}'; + + return template; + }; + + // Add a "chain" function. Start chaining a wrapped Underscore object. + _.chain = function(obj) { + var instance = _(obj); + instance._chain = true; + return instance; + }; + + // OOP + // --------------- + // If Underscore is called as a function, it returns a wrapped object that + // can be used OO-style. This wrapper holds altered versions of all the + // underscore functions. Wrapped objects may be chained. + + // Helper function to continue chaining intermediate results. + var result = function(instance, obj) { + return instance._chain ? _(obj).chain() : obj; + }; + + // Add your own custom functions to the Underscore object. + _.mixin = function(obj) { + _.each(_.functions(obj), function(name) { + var func = _[name] = obj[name]; + _.prototype[name] = function() { + var args = [this._wrapped]; + push.apply(args, arguments); + return result(this, func.apply(_, args)); + }; + }); + }; + + // Add all of the Underscore functions to the wrapper object. + _.mixin(_); + + // Add all mutator Array functions to the wrapper. + _.each(['pop', 'push', 'reverse', 'shift', 'sort', 'splice', 'unshift'], function(name) { + var method = ArrayProto[name]; + _.prototype[name] = function() { + var obj = this._wrapped; + method.apply(obj, arguments); + if ((name === 'shift' || name === 'splice') && obj.length === 0) delete obj[0]; + return result(this, obj); + }; + }); + + // Add all accessor Array functions to the wrapper. + _.each(['concat', 'join', 'slice'], function(name) { + var method = ArrayProto[name]; + _.prototype[name] = function() { + return result(this, method.apply(this._wrapped, arguments)); + }; + }); + + // Extracts the result from a wrapped and chained object. + _.prototype.value = function() { + return this._wrapped; + }; + + // Provide unwrapping proxy for some methods used in engine operations + // such as arithmetic and JSON stringification. + _.prototype.valueOf = _.prototype.toJSON = _.prototype.value; + + _.prototype.toString = function() { + return '' + this._wrapped; + }; + + // AMD registration happens at the end for compatibility with AMD loaders + // that may not enforce next-turn semantics on modules. Even though general + // practice for AMD registration is to be anonymous, underscore registers + // as a named module because, like jQuery, it is a base library that is + // popular enough to be bundled in a third party lib, but not be part of + // an AMD load request. Those cases could generate an error when an + // anonymous define() is called outside of a loader request. + if (typeof define === 'function' && define.amd) { + define('underscore', [], function() { + return _; + }); + } +}.call(this)); + +},{}],26:[function(require,module,exports){ +arguments[4][19][0].apply(exports,arguments) +},{"dup":19}],27:[function(require,module,exports){ +module.exports = function isBuffer(arg) { + return arg && typeof arg === 'object' + && typeof arg.copy === 'function' + && typeof arg.fill === 'function' + && typeof arg.readUInt8 === 'function'; +} +},{}],28:[function(require,module,exports){ +(function (process,global){ +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +var formatRegExp = /%[sdj%]/g; +exports.format = function(f) { + if (!isString(f)) { + var objects = []; + for (var i = 0; i < arguments.length; i++) { + objects.push(inspect(arguments[i])); + } + return objects.join(' '); + } + + var i = 1; + var args = arguments; + var len = args.length; + var str = String(f).replace(formatRegExp, function(x) { + if (x === '%%') return '%'; + if (i >= len) return x; + switch (x) { + case '%s': return String(args[i++]); + case '%d': return Number(args[i++]); + case '%j': + try { + return JSON.stringify(args[i++]); + } catch (_) { + return '[Circular]'; + } + default: + return x; + } + }); + for (var x = args[i]; i < len; x = args[++i]) { + if (isNull(x) || !isObject(x)) { + str += ' ' + x; + } else { + str += ' ' + inspect(x); + } + } + return str; +}; + + +// Mark that a method should not be used. +// Returns a modified function which warns once by default. +// If --no-deprecation is set, then it is a no-op. +exports.deprecate = function(fn, msg) { + // Allow for deprecating things in the process of starting up. + if (isUndefined(global.process)) { + return function() { + return exports.deprecate(fn, msg).apply(this, arguments); + }; + } + + if (process.noDeprecation === true) { + return fn; + } + + var warned = false; + function deprecated() { + if (!warned) { + if (process.throwDeprecation) { + throw new Error(msg); + } else if (process.traceDeprecation) { + console.trace(msg); + } else { + console.error(msg); + } + warned = true; + } + return fn.apply(this, arguments); + } + + return deprecated; +}; + + +var debugs = {}; +var debugEnviron; +exports.debuglog = function(set) { + if (isUndefined(debugEnviron)) + debugEnviron = process.env.NODE_DEBUG || ''; + set = set.toUpperCase(); + if (!debugs[set]) { + if (new RegExp('\\b' + set + '\\b', 'i').test(debugEnviron)) { + var pid = process.pid; + debugs[set] = function() { + var msg = exports.format.apply(exports, arguments); + console.error('%s %d: %s', set, pid, msg); + }; + } else { + debugs[set] = function() {}; + } + } + return debugs[set]; +}; + + +/** + * Echos the value of a value. Trys to print the value out + * in the best way possible given the different types. + * + * @param {Object} obj The object to print out. + * @param {Object} opts Optional options object that alters the output. + */ +/* legacy: obj, showHidden, depth, colors*/ +function inspect(obj, opts) { + // default options + var ctx = { + seen: [], + stylize: stylizeNoColor + }; + // legacy... + if (arguments.length >= 3) ctx.depth = arguments[2]; + if (arguments.length >= 4) ctx.colors = arguments[3]; + if (isBoolean(opts)) { + // legacy... + ctx.showHidden = opts; + } else if (opts) { + // got an "options" object + exports._extend(ctx, opts); + } + // set default options + if (isUndefined(ctx.showHidden)) ctx.showHidden = false; + if (isUndefined(ctx.depth)) ctx.depth = 2; + if (isUndefined(ctx.colors)) ctx.colors = false; + if (isUndefined(ctx.customInspect)) ctx.customInspect = true; + if (ctx.colors) ctx.stylize = stylizeWithColor; + return formatValue(ctx, obj, ctx.depth); +} +exports.inspect = inspect; + + +// http://en.wikipedia.org/wiki/ANSI_escape_code#graphics +inspect.colors = { + 'bold' : [1, 22], + 'italic' : [3, 23], + 'underline' : [4, 24], + 'inverse' : [7, 27], + 'white' : [37, 39], + 'grey' : [90, 39], + 'black' : [30, 39], + 'blue' : [34, 39], + 'cyan' : [36, 39], + 'green' : [32, 39], + 'magenta' : [35, 39], + 'red' : [31, 39], + 'yellow' : [33, 39] +}; + +// Don't use 'blue' not visible on cmd.exe +inspect.styles = { + 'special': 'cyan', + 'number': 'yellow', + 'boolean': 'yellow', + 'undefined': 'grey', + 'null': 'bold', + 'string': 'green', + 'date': 'magenta', + // "name": intentionally not styling + 'regexp': 'red' +}; + + +function stylizeWithColor(str, styleType) { + var style = inspect.styles[styleType]; + + if (style) { + return '\u001b[' + inspect.colors[style][0] + 'm' + str + + '\u001b[' + inspect.colors[style][1] + 'm'; + } else { + return str; + } +} + + +function stylizeNoColor(str, styleType) { + return str; +} + + +function arrayToHash(array) { + var hash = {}; + + array.forEach(function(val, idx) { + hash[val] = true; + }); + + return hash; +} + + +function formatValue(ctx, value, recurseTimes) { + // Provide a hook for user-specified inspect functions. + // Check that value is an object with an inspect function on it + if (ctx.customInspect && + value && + isFunction(value.inspect) && + // Filter out the util module, it's inspect function is special + value.inspect !== exports.inspect && + // Also filter out any prototype objects using the circular check. + !(value.constructor && value.constructor.prototype === value)) { + var ret = value.inspect(recurseTimes, ctx); + if (!isString(ret)) { + ret = formatValue(ctx, ret, recurseTimes); + } + return ret; + } + + // Primitive types cannot have properties + var primitive = formatPrimitive(ctx, value); + if (primitive) { + return primitive; + } + + // Look up the keys of the object. + var keys = Object.keys(value); + var visibleKeys = arrayToHash(keys); + + if (ctx.showHidden) { + keys = Object.getOwnPropertyNames(value); + } + + // IE doesn't make error fields non-enumerable + // http://msdn.microsoft.com/en-us/library/ie/dww52sbt(v=vs.94).aspx + if (isError(value) + && (keys.indexOf('message') >= 0 || keys.indexOf('description') >= 0)) { + return formatError(value); + } + + // Some type of object without properties can be shortcutted. + if (keys.length === 0) { + if (isFunction(value)) { + var name = value.name ? ': ' + value.name : ''; + return ctx.stylize('[Function' + name + ']', 'special'); + } + if (isRegExp(value)) { + return ctx.stylize(RegExp.prototype.toString.call(value), 'regexp'); + } + if (isDate(value)) { + return ctx.stylize(Date.prototype.toString.call(value), 'date'); + } + if (isError(value)) { + return formatError(value); + } + } + + var base = '', array = false, braces = ['{', '}']; + + // Make Array say that they are Array + if (isArray(value)) { + array = true; + braces = ['[', ']']; + } + + // Make functions say that they are functions + if (isFunction(value)) { + var n = value.name ? ': ' + value.name : ''; + base = ' [Function' + n + ']'; + } + + // Make RegExps say that they are RegExps + if (isRegExp(value)) { + base = ' ' + RegExp.prototype.toString.call(value); + } + + // Make dates with properties first say the date + if (isDate(value)) { + base = ' ' + Date.prototype.toUTCString.call(value); + } + + // Make error with message first say the error + if (isError(value)) { + base = ' ' + formatError(value); + } + + if (keys.length === 0 && (!array || value.length == 0)) { + return braces[0] + base + braces[1]; + } + + if (recurseTimes < 0) { + if (isRegExp(value)) { + return ctx.stylize(RegExp.prototype.toString.call(value), 'regexp'); + } else { + return ctx.stylize('[Object]', 'special'); + } + } + + ctx.seen.push(value); + + var output; + if (array) { + output = formatArray(ctx, value, recurseTimes, visibleKeys, keys); + } else { + output = keys.map(function(key) { + return formatProperty(ctx, value, recurseTimes, visibleKeys, key, array); + }); + } + + ctx.seen.pop(); + + return reduceToSingleString(output, base, braces); +} + + +function formatPrimitive(ctx, value) { + if (isUndefined(value)) + return ctx.stylize('undefined', 'undefined'); + if (isString(value)) { + var simple = '\'' + JSON.stringify(value).replace(/^"|"$/g, '') + .replace(/'/g, "\\'") + .replace(/\\"/g, '"') + '\''; + return ctx.stylize(simple, 'string'); + } + if (isNumber(value)) + return ctx.stylize('' + value, 'number'); + if (isBoolean(value)) + return ctx.stylize('' + value, 'boolean'); + // For some reason typeof null is "object", so special case here. + if (isNull(value)) + return ctx.stylize('null', 'null'); +} + + +function formatError(value) { + return '[' + Error.prototype.toString.call(value) + ']'; +} + + +function formatArray(ctx, value, recurseTimes, visibleKeys, keys) { + var output = []; + for (var i = 0, l = value.length; i < l; ++i) { + if (hasOwnProperty(value, String(i))) { + output.push(formatProperty(ctx, value, recurseTimes, visibleKeys, + String(i), true)); + } else { + output.push(''); + } + } + keys.forEach(function(key) { + if (!key.match(/^\d+$/)) { + output.push(formatProperty(ctx, value, recurseTimes, visibleKeys, + key, true)); + } + }); + return output; +} + + +function formatProperty(ctx, value, recurseTimes, visibleKeys, key, array) { + var name, str, desc; + desc = Object.getOwnPropertyDescriptor(value, key) || { value: value[key] }; + if (desc.get) { + if (desc.set) { + str = ctx.stylize('[Getter/Setter]', 'special'); + } else { + str = ctx.stylize('[Getter]', 'special'); + } + } else { + if (desc.set) { + str = ctx.stylize('[Setter]', 'special'); + } + } + if (!hasOwnProperty(visibleKeys, key)) { + name = '[' + key + ']'; + } + if (!str) { + if (ctx.seen.indexOf(desc.value) < 0) { + if (isNull(recurseTimes)) { + str = formatValue(ctx, desc.value, null); + } else { + str = formatValue(ctx, desc.value, recurseTimes - 1); + } + if (str.indexOf('\n') > -1) { + if (array) { + str = str.split('\n').map(function(line) { + return ' ' + line; + }).join('\n').substr(2); + } else { + str = '\n' + str.split('\n').map(function(line) { + return ' ' + line; + }).join('\n'); + } + } + } else { + str = ctx.stylize('[Circular]', 'special'); + } + } + if (isUndefined(name)) { + if (array && key.match(/^\d+$/)) { + return str; + } + name = JSON.stringify('' + key); + if (name.match(/^"([a-zA-Z_][a-zA-Z_0-9]*)"$/)) { + name = name.substr(1, name.length - 2); + name = ctx.stylize(name, 'name'); + } else { + name = name.replace(/'/g, "\\'") + .replace(/\\"/g, '"') + .replace(/(^"|"$)/g, "'"); + name = ctx.stylize(name, 'string'); + } + } + + return name + ': ' + str; +} + + +function reduceToSingleString(output, base, braces) { + var numLinesEst = 0; + var length = output.reduce(function(prev, cur) { + numLinesEst++; + if (cur.indexOf('\n') >= 0) numLinesEst++; + return prev + cur.replace(/\u001b\[\d\d?m/g, '').length + 1; + }, 0); + + if (length > 60) { + return braces[0] + + (base === '' ? '' : base + '\n ') + + ' ' + + output.join(',\n ') + + ' ' + + braces[1]; + } + + return braces[0] + base + ' ' + output.join(', ') + ' ' + braces[1]; +} + + +// NOTE: These type checking functions intentionally don't use `instanceof` +// because it is fragile and can be easily faked with `Object.create()`. +function isArray(ar) { + return Array.isArray(ar); +} +exports.isArray = isArray; + +function isBoolean(arg) { + return typeof arg === 'boolean'; +} +exports.isBoolean = isBoolean; + +function isNull(arg) { + return arg === null; +} +exports.isNull = isNull; + +function isNullOrUndefined(arg) { + return arg == null; +} +exports.isNullOrUndefined = isNullOrUndefined; + +function isNumber(arg) { + return typeof arg === 'number'; +} +exports.isNumber = isNumber; + +function isString(arg) { + return typeof arg === 'string'; +} +exports.isString = isString; + +function isSymbol(arg) { + return typeof arg === 'symbol'; +} +exports.isSymbol = isSymbol; + +function isUndefined(arg) { + return arg === void 0; +} +exports.isUndefined = isUndefined; + +function isRegExp(re) { + return isObject(re) && objectToString(re) === '[object RegExp]'; +} +exports.isRegExp = isRegExp; + +function isObject(arg) { + return typeof arg === 'object' && arg !== null; +} +exports.isObject = isObject; + +function isDate(d) { + return isObject(d) && objectToString(d) === '[object Date]'; +} +exports.isDate = isDate; + +function isError(e) { + return isObject(e) && + (objectToString(e) === '[object Error]' || e instanceof Error); +} +exports.isError = isError; + +function isFunction(arg) { + return typeof arg === 'function'; +} +exports.isFunction = isFunction; + +function isPrimitive(arg) { + return arg === null || + typeof arg === 'boolean' || + typeof arg === 'number' || + typeof arg === 'string' || + typeof arg === 'symbol' || // ES6 symbol + typeof arg === 'undefined'; +} +exports.isPrimitive = isPrimitive; + +exports.isBuffer = require('./support/isBuffer'); + +function objectToString(o) { + return Object.prototype.toString.call(o); +} + + +function pad(n) { + return n < 10 ? '0' + n.toString(10) : n.toString(10); +} + + +var months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', + 'Oct', 'Nov', 'Dec']; + +// 26 Feb 16:19:34 +function timestamp() { + var d = new Date(); + var time = [pad(d.getHours()), + pad(d.getMinutes()), + pad(d.getSeconds())].join(':'); + return [d.getDate(), months[d.getMonth()], time].join(' '); +} + + +// log is just a thin wrapper to console.log that prepends a timestamp +exports.log = function() { + console.log('%s - %s', timestamp(), exports.format.apply(exports, arguments)); +}; + + +/** + * Inherit the prototype methods from one constructor into another. + * + * The Function.prototype.inherits from lang.js rewritten as a standalone + * function (not on Function.prototype). NOTE: If this file is to be loaded + * during bootstrapping this function needs to be rewritten using some native + * functions as prototype setup using normal JavaScript does not work as + * expected during bootstrapping (see mirror.js in r114903). + * + * @param {function} ctor Constructor function which needs to inherit the + * prototype. + * @param {function} superCtor Constructor function to inherit prototype from. + */ +exports.inherits = require('inherits'); + +exports._extend = function(origin, add) { + // Don't do anything if add isn't an object + if (!add || !isObject(add)) return origin; + + var keys = Object.keys(add); + var i = keys.length; + while (i--) { + origin[keys[i]] = add[keys[i]]; + } + return origin; +}; + +function hasOwnProperty(obj, prop) { + return Object.prototype.hasOwnProperty.call(obj, prop); +} + +}).call(this,require('_process'),typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {}) +},{"./support/isBuffer":27,"_process":24,"inherits":26}],29:[function(require,module,exports){ +// Returns a wrapper function that returns a wrapped callback +// The wrapper function should do some stuff, and return a +// presumably different callback function. +// This makes sure that own properties are retained, so that +// decorations and such are not lost along the way. +module.exports = wrappy +function wrappy (fn, cb) { + if (fn && cb) return wrappy(fn)(cb) + + if (typeof fn !== 'function') + throw new TypeError('need wrapper function') + + Object.keys(fn).forEach(function (k) { + wrapper[k] = fn[k] + }) + + return wrapper + + function wrapper() { + var args = new Array(arguments.length) + for (var i = 0; i < args.length; i++) { + args[i] = arguments[i] + } + var ret = fn.apply(this, args) + var cb = args[args.length-1] + if (typeof ret === 'function' && ret !== cb) { + Object.keys(cb).forEach(function (k) { + ret[k] = cb[k] + }) + } + return ret + } +} + +},{}]},{},[7])(7) +}); \ No newline at end of file diff --git a/assets/javascripts/workers/search.1e90e0fb.min.js b/assets/javascripts/workers/search.1e90e0fb.min.js new file mode 100644 index 000000000..ff43aeddd --- /dev/null +++ b/assets/javascripts/workers/search.1e90e0fb.min.js @@ -0,0 +1,2 @@ +"use strict";(()=>{var xe=Object.create;var G=Object.defineProperty,ve=Object.defineProperties,Se=Object.getOwnPropertyDescriptor,Te=Object.getOwnPropertyDescriptors,Qe=Object.getOwnPropertyNames,Y=Object.getOwnPropertySymbols,Ee=Object.getPrototypeOf,X=Object.prototype.hasOwnProperty,be=Object.prototype.propertyIsEnumerable;var Z=Math.pow,J=(t,e,r)=>e in t?G(t,e,{enumerable:!0,configurable:!0,writable:!0,value:r}):t[e]=r,_=(t,e)=>{for(var r in e||(e={}))X.call(e,r)&&J(t,r,e[r]);if(Y)for(var r of Y(e))be.call(e,r)&&J(t,r,e[r]);return t},B=(t,e)=>ve(t,Te(e));var Le=(t,e)=>()=>(e||t((e={exports:{}}).exports,e),e.exports);var we=(t,e,r,n)=>{if(e&&typeof e=="object"||typeof e=="function")for(let i of Qe(e))!X.call(t,i)&&i!==r&&G(t,i,{get:()=>e[i],enumerable:!(n=Se(e,i))||n.enumerable});return t};var Pe=(t,e,r)=>(r=t!=null?xe(Ee(t)):{},we(e||!t||!t.__esModule?G(r,"default",{value:t,enumerable:!0}):r,t));var W=(t,e,r)=>new Promise((n,i)=>{var s=u=>{try{a(r.next(u))}catch(c){i(c)}},o=u=>{try{a(r.throw(u))}catch(c){i(c)}},a=u=>u.done?n(u.value):Promise.resolve(u.value).then(s,o);a((r=r.apply(t,e)).next())});var te=Le((K,ee)=>{(function(){var t=function(e){var r=new t.Builder;return r.pipeline.add(t.trimmer,t.stopWordFilter,t.stemmer),r.searchPipeline.add(t.stemmer),e.call(r,r),r.build()};t.version="2.3.9";t.utils={},t.utils.warn=function(e){return function(r){e.console&&console.warn&&console.warn(r)}}(this),t.utils.asString=function(e){return e==null?"":e.toString()},t.utils.clone=function(e){if(e==null)return e;for(var r=Object.create(null),n=Object.keys(e),i=0;i0){var f=t.utils.clone(r)||{};f.position=[a,c],f.index=s.length,s.push(new t.Token(n.slice(a,o),f))}a=o+1}}return s},t.tokenizer.separator=/[\s\-]+/;t.Pipeline=function(){this._stack=[]},t.Pipeline.registeredFunctions=Object.create(null),t.Pipeline.registerFunction=function(e,r){r in this.registeredFunctions&&t.utils.warn("Overwriting existing registered function: "+r),e.label=r,t.Pipeline.registeredFunctions[e.label]=e},t.Pipeline.warnIfFunctionNotRegistered=function(e){var r=e.label&&e.label in this.registeredFunctions;r||t.utils.warn(`Function is not registered with pipeline. This may cause problems when serialising the index. +`,e)},t.Pipeline.load=function(e){var r=new t.Pipeline;return e.forEach(function(n){var i=t.Pipeline.registeredFunctions[n];if(i)r.add(i);else throw new Error("Cannot load unregistered function: "+n)}),r},t.Pipeline.prototype.add=function(){var e=Array.prototype.slice.call(arguments);e.forEach(function(r){t.Pipeline.warnIfFunctionNotRegistered(r),this._stack.push(r)},this)},t.Pipeline.prototype.after=function(e,r){t.Pipeline.warnIfFunctionNotRegistered(r);var n=this._stack.indexOf(e);if(n==-1)throw new Error("Cannot find existingFn");n=n+1,this._stack.splice(n,0,r)},t.Pipeline.prototype.before=function(e,r){t.Pipeline.warnIfFunctionNotRegistered(r);var n=this._stack.indexOf(e);if(n==-1)throw new Error("Cannot find existingFn");this._stack.splice(n,0,r)},t.Pipeline.prototype.remove=function(e){var r=this._stack.indexOf(e);r!=-1&&this._stack.splice(r,1)},t.Pipeline.prototype.run=function(e){for(var r=this._stack.length,n=0;n1&&(oe&&(n=s),o!=e);)i=n-r,s=r+Math.floor(i/2),o=this.elements[s*2];if(o==e||o>e)return s*2;if(ou?f+=2:a==u&&(r+=n[c+1]*i[f+1],c+=2,f+=2);return r},t.Vector.prototype.similarity=function(e){return this.dot(e)/this.magnitude()||0},t.Vector.prototype.toArray=function(){for(var e=new Array(this.elements.length/2),r=1,n=0;r0){var o=s.str.charAt(0),a;o in s.node.edges?a=s.node.edges[o]:(a=new t.TokenSet,s.node.edges[o]=a),s.str.length==1&&(a.final=!0),i.push({node:a,editsRemaining:s.editsRemaining,str:s.str.slice(1)})}if(s.editsRemaining!=0){if("*"in s.node.edges)var u=s.node.edges["*"];else{var u=new t.TokenSet;s.node.edges["*"]=u}if(s.str.length==0&&(u.final=!0),i.push({node:u,editsRemaining:s.editsRemaining-1,str:s.str}),s.str.length>1&&i.push({node:s.node,editsRemaining:s.editsRemaining-1,str:s.str.slice(1)}),s.str.length==1&&(s.node.final=!0),s.str.length>=1){if("*"in s.node.edges)var c=s.node.edges["*"];else{var c=new t.TokenSet;s.node.edges["*"]=c}s.str.length==1&&(c.final=!0),i.push({node:c,editsRemaining:s.editsRemaining-1,str:s.str.slice(1)})}if(s.str.length>1){var f=s.str.charAt(0),g=s.str.charAt(1),l;g in s.node.edges?l=s.node.edges[g]:(l=new t.TokenSet,s.node.edges[g]=l),s.str.length==1&&(l.final=!0),i.push({node:l,editsRemaining:s.editsRemaining-1,str:f+s.str.slice(2)})}}}return n},t.TokenSet.fromString=function(e){for(var r=new t.TokenSet,n=r,i=0,s=e.length;i=e;r--){var n=this.uncheckedNodes[r],i=n.child.toString();i in this.minimizedNodes?n.parent.edges[n.char]=this.minimizedNodes[i]:(n.child._str=i,this.minimizedNodes[i]=n.child),this.uncheckedNodes.pop()}};t.Index=function(e){this.invertedIndex=e.invertedIndex,this.fieldVectors=e.fieldVectors,this.tokenSet=e.tokenSet,this.fields=e.fields,this.pipeline=e.pipeline},t.Index.prototype.search=function(e){return this.query(function(r){var n=new t.QueryParser(e,r);n.parse()})},t.Index.prototype.query=function(e){for(var r=new t.Query(this.fields),n=Object.create(null),i=Object.create(null),s=Object.create(null),o=Object.create(null),a=Object.create(null),u=0;u1?this._b=1:this._b=e},t.Builder.prototype.k1=function(e){this._k1=e},t.Builder.prototype.add=function(e,r){var n=e[this._ref],i=Object.keys(this._fields);this._documents[n]=r||{},this.documentCount+=1;for(var s=0;s=this.length)return t.QueryLexer.EOS;var e=this.str.charAt(this.pos);return this.pos+=1,e},t.QueryLexer.prototype.width=function(){return this.pos-this.start},t.QueryLexer.prototype.ignore=function(){this.start==this.pos&&(this.pos+=1),this.start=this.pos},t.QueryLexer.prototype.backup=function(){this.pos-=1},t.QueryLexer.prototype.acceptDigitRun=function(){var e,r;do e=this.next(),r=e.charCodeAt(0);while(r>47&&r<58);e!=t.QueryLexer.EOS&&this.backup()},t.QueryLexer.prototype.more=function(){return this.pos1&&(e.backup(),e.emit(t.QueryLexer.TERM)),e.ignore(),e.more())return t.QueryLexer.lexText},t.QueryLexer.lexEditDistance=function(e){return e.ignore(),e.acceptDigitRun(),e.emit(t.QueryLexer.EDIT_DISTANCE),t.QueryLexer.lexText},t.QueryLexer.lexBoost=function(e){return e.ignore(),e.acceptDigitRun(),e.emit(t.QueryLexer.BOOST),t.QueryLexer.lexText},t.QueryLexer.lexEOS=function(e){e.width()>0&&e.emit(t.QueryLexer.TERM)},t.QueryLexer.termSeparator=t.tokenizer.separator,t.QueryLexer.lexText=function(e){for(;;){var r=e.next();if(r==t.QueryLexer.EOS)return t.QueryLexer.lexEOS;if(r.charCodeAt(0)==92){e.escapeCharacter();continue}if(r==":")return t.QueryLexer.lexField;if(r=="~")return e.backup(),e.width()>0&&e.emit(t.QueryLexer.TERM),t.QueryLexer.lexEditDistance;if(r=="^")return e.backup(),e.width()>0&&e.emit(t.QueryLexer.TERM),t.QueryLexer.lexBoost;if(r=="+"&&e.width()===1||r=="-"&&e.width()===1)return e.emit(t.QueryLexer.PRESENCE),t.QueryLexer.lexText;if(r.match(t.QueryLexer.termSeparator))return t.QueryLexer.lexTerm}},t.QueryParser=function(e,r){this.lexer=new t.QueryLexer(e),this.query=r,this.currentClause={},this.lexemeIdx=0},t.QueryParser.prototype.parse=function(){this.lexer.run(),this.lexemes=this.lexer.lexemes;for(var e=t.QueryParser.parseClause;e;)e=e(this);return this.query},t.QueryParser.prototype.peekLexeme=function(){return this.lexemes[this.lexemeIdx]},t.QueryParser.prototype.consumeLexeme=function(){var e=this.peekLexeme();return this.lexemeIdx+=1,e},t.QueryParser.prototype.nextClause=function(){var e=this.currentClause;this.query.clause(e),this.currentClause={}},t.QueryParser.parseClause=function(e){var r=e.peekLexeme();if(r!=null)switch(r.type){case t.QueryLexer.PRESENCE:return t.QueryParser.parsePresence;case t.QueryLexer.FIELD:return t.QueryParser.parseField;case t.QueryLexer.TERM:return t.QueryParser.parseTerm;default:var n="expected either a field or a term, found "+r.type;throw r.str.length>=1&&(n+=" with value '"+r.str+"'"),new t.QueryParseError(n,r.start,r.end)}},t.QueryParser.parsePresence=function(e){var r=e.consumeLexeme();if(r!=null){switch(r.str){case"-":e.currentClause.presence=t.Query.presence.PROHIBITED;break;case"+":e.currentClause.presence=t.Query.presence.REQUIRED;break;default:var n="unrecognised presence operator'"+r.str+"'";throw new t.QueryParseError(n,r.start,r.end)}var i=e.peekLexeme();if(i==null){var n="expecting term or field, found nothing";throw new t.QueryParseError(n,r.start,r.end)}switch(i.type){case t.QueryLexer.FIELD:return t.QueryParser.parseField;case t.QueryLexer.TERM:return t.QueryParser.parseTerm;default:var n="expecting term or field, found '"+i.type+"'";throw new t.QueryParseError(n,i.start,i.end)}}},t.QueryParser.parseField=function(e){var r=e.consumeLexeme();if(r!=null){if(e.query.allFields.indexOf(r.str)==-1){var n=e.query.allFields.map(function(o){return"'"+o+"'"}).join(", "),i="unrecognised field '"+r.str+"', possible fields: "+n;throw new t.QueryParseError(i,r.start,r.end)}e.currentClause.fields=[r.str];var s=e.peekLexeme();if(s==null){var i="expecting term, found nothing";throw new t.QueryParseError(i,r.start,r.end)}switch(s.type){case t.QueryLexer.TERM:return t.QueryParser.parseTerm;default:var i="expecting term, found '"+s.type+"'";throw new t.QueryParseError(i,s.start,s.end)}}},t.QueryParser.parseTerm=function(e){var r=e.consumeLexeme();if(r!=null){e.currentClause.term=r.str.toLowerCase(),r.str.indexOf("*")!=-1&&(e.currentClause.usePipeline=!1);var n=e.peekLexeme();if(n==null){e.nextClause();return}switch(n.type){case t.QueryLexer.TERM:return e.nextClause(),t.QueryParser.parseTerm;case t.QueryLexer.FIELD:return e.nextClause(),t.QueryParser.parseField;case t.QueryLexer.EDIT_DISTANCE:return t.QueryParser.parseEditDistance;case t.QueryLexer.BOOST:return t.QueryParser.parseBoost;case t.QueryLexer.PRESENCE:return e.nextClause(),t.QueryParser.parsePresence;default:var i="Unexpected lexeme type '"+n.type+"'";throw new t.QueryParseError(i,n.start,n.end)}}},t.QueryParser.parseEditDistance=function(e){var r=e.consumeLexeme();if(r!=null){var n=parseInt(r.str,10);if(isNaN(n)){var i="edit distance must be numeric";throw new t.QueryParseError(i,r.start,r.end)}e.currentClause.editDistance=n;var s=e.peekLexeme();if(s==null){e.nextClause();return}switch(s.type){case t.QueryLexer.TERM:return e.nextClause(),t.QueryParser.parseTerm;case t.QueryLexer.FIELD:return e.nextClause(),t.QueryParser.parseField;case t.QueryLexer.EDIT_DISTANCE:return t.QueryParser.parseEditDistance;case t.QueryLexer.BOOST:return t.QueryParser.parseBoost;case t.QueryLexer.PRESENCE:return e.nextClause(),t.QueryParser.parsePresence;default:var i="Unexpected lexeme type '"+s.type+"'";throw new t.QueryParseError(i,s.start,s.end)}}},t.QueryParser.parseBoost=function(e){var r=e.consumeLexeme();if(r!=null){var n=parseInt(r.str,10);if(isNaN(n)){var i="boost must be numeric";throw new t.QueryParseError(i,r.start,r.end)}e.currentClause.boost=n;var s=e.peekLexeme();if(s==null){e.nextClause();return}switch(s.type){case t.QueryLexer.TERM:return e.nextClause(),t.QueryParser.parseTerm;case t.QueryLexer.FIELD:return e.nextClause(),t.QueryParser.parseField;case t.QueryLexer.EDIT_DISTANCE:return t.QueryParser.parseEditDistance;case t.QueryLexer.BOOST:return t.QueryParser.parseBoost;case t.QueryLexer.PRESENCE:return e.nextClause(),t.QueryParser.parsePresence;default:var i="Unexpected lexeme type '"+s.type+"'";throw new t.QueryParseError(i,s.start,s.end)}}},function(e,r){typeof define=="function"&&define.amd?define(r):typeof K=="object"?ee.exports=r():e.lunr=r()}(this,function(){return t})})()});var de=Pe(te());function re(t,e=document){let r=ke(t,e);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${t}" to be present`);return r}function ke(t,e=document){return e.querySelector(t)||void 0}Object.entries||(Object.entries=function(t){let e=[];for(let r of Object.keys(t))e.push([r,t[r]]);return e});Object.values||(Object.values=function(t){let e=[];for(let r of Object.keys(t))e.push(t[r]);return e});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(t,e){typeof t=="object"?(this.scrollLeft=t.left,this.scrollTop=t.top):(this.scrollLeft=t,this.scrollTop=e)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...t){let e=this.parentNode;if(e){t.length===0&&e.removeChild(this);for(let r=t.length-1;r>=0;r--){let n=t[r];typeof n=="string"?n=document.createTextNode(n):n.parentNode&&n.parentNode.removeChild(n),r?e.insertBefore(this.previousSibling,n):e.replaceChild(n,this)}}}));function ne(t){let e=new Map;for(let r of t){let[n]=r.location.split("#"),i=e.get(n);typeof i=="undefined"?e.set(n,r):(e.set(r.location,r),r.parent=i)}return e}function H(t,e,r){var s;e=new RegExp(e,"g");let n,i=0;do{n=e.exec(t);let o=(s=n==null?void 0:n.index)!=null?s:t.length;if(in?e(r,1,n,n=i):t.charAt(i)===">"&&(t.charAt(n+1)==="/"?--s===0&&e(r++,2,n,i+1):t.charAt(i-1)!=="/"&&s++===0&&e(r,0,n,i+1),n=i+1);i>n&&e(r,1,n,i)}function se(t,e,r,n=!1){return q([t],e,r,n).pop()}function q(t,e,r,n=!1){let i=[0];for(let s=1;s>>2&1023,c=a[0]>>>12;i.push(+(u>c)+i[i.length-1])}return t.map((s,o)=>{let a=0,u=new Map;for(let f of r.sort((g,l)=>g-l)){let g=f&1048575,l=f>>>20;if(i[l]!==o)continue;let m=u.get(l);typeof m=="undefined"&&u.set(l,m=[]),m.push(g)}if(u.size===0)return s;let c=[];for(let[f,g]of u){let l=e[f],m=l[0]>>>12,x=l[l.length-1]>>>12,v=l[l.length-1]>>>2&1023;n&&m>a&&c.push(s.slice(a,m));let d=s.slice(m,x+v);for(let y of g.sort((b,E)=>E-b)){let b=(l[y]>>>12)-m,E=(l[y]>>>2&1023)+b;d=[d.slice(0,b),"",d.slice(b,E),"",d.slice(E)].join("")}if(a=x+v,c.push(d)===2)break}return n&&a{var f;switch(i[f=o+=s]||(i[f]=[]),a){case 0:case 2:i[o].push(u<<12|c-u<<2|a);break;case 1:let g=r[n].slice(u,c);H(g,lunr.tokenizer.separator,(l,m)=>{if(typeof lunr.segmenter!="undefined"){let x=g.slice(l,m);if(/^[MHIK]$/.test(lunr.segmenter.ctype_(x))){let v=lunr.segmenter.segment(x);for(let d=0,y=0;dr){return t.trim().split(/"([^"]+)"/g).map((r,n)=>n&1?r.replace(/^\b|^(?![^\x00-\x7F]|$)|\s+/g," +"):r).join("").replace(/"|(?:^|\s+)[*+\-:^~]+(?=\s+|$)/g,"").split(/\s+/g).reduce((r,n)=>{let i=e(n);return[...r,...Array.isArray(i)?i:[i]]},[]).map(r=>/([~^]$)/.test(r)?`${r}1`:r).map(r=>/(^[+-]|[~^]\d+$)/.test(r)?r:`${r}*`).join(" ")}function ue(t){return ae(t,e=>{let r=[],n=new lunr.QueryLexer(e);n.run();for(let{type:i,str:s,start:o,end:a}of n.lexemes)switch(i){case"FIELD":["title","text","tags"].includes(s)||(e=[e.slice(0,a)," ",e.slice(a+1)].join(""));break;case"TERM":H(s,lunr.tokenizer.separator,(...u)=>{r.push([e.slice(0,o),s.slice(...u),e.slice(a)].join(""))})}return r})}function ce(t){let e=new lunr.Query(["title","text","tags"]);new lunr.QueryParser(t,e).parse();for(let n of e.clauses)n.usePipeline=!0,n.term.startsWith("*")&&(n.wildcard=lunr.Query.wildcard.LEADING,n.term=n.term.slice(1)),n.term.endsWith("*")&&(n.wildcard=lunr.Query.wildcard.TRAILING,n.term=n.term.slice(0,-1));return e.clauses}function le(t,e){var i;let r=new Set(t),n={};for(let s=0;s0;){let o=i[--s];for(let u=1;un[o]-u&&(r.add(t.slice(o,o+u)),i[s++]=o+u);let a=o+n[o];n[a]&&ar=>{if(typeof r[e]=="undefined")return;let n=[r.location,e].join(":");return t.set(n,lunr.tokenizer.table=[]),r[e]}}function Re(t,e){let[r,n]=[new Set(t),new Set(e)];return[...new Set([...r].filter(i=>!n.has(i)))]}var U=class{constructor({config:e,docs:r,options:n}){let i=Oe(this.table=new Map);this.map=ne(r),this.options=n,this.index=lunr(function(){this.metadataWhitelist=["position"],this.b(0),e.lang.length===1&&e.lang[0]!=="en"?this.use(lunr[e.lang[0]]):e.lang.length>1&&this.use(lunr.multiLanguage(...e.lang)),this.tokenizer=oe,lunr.tokenizer.separator=new RegExp(e.separator),lunr.segmenter="TinySegmenter"in lunr?new lunr.TinySegmenter:void 0;let s=Re(["trimmer","stopWordFilter","stemmer"],e.pipeline);for(let o of e.lang.map(a=>a==="en"?lunr:lunr[a]))for(let a of s)this.pipeline.remove(o[a]),this.searchPipeline.remove(o[a]);this.ref("location");for(let[o,a]of Object.entries(e.fields))this.field(o,B(_({},a),{extractor:i(o)}));for(let o of r)this.add(o,{boost:o.boost})})}search(e){if(e=e.replace(new RegExp("\\p{sc=Han}+","gu"),s=>[...he(s,this.index.invertedIndex)].join("* ")),e=ue(e),!e)return{items:[]};let r=ce(e).filter(s=>s.presence!==lunr.Query.presence.PROHIBITED),n=this.index.search(e).reduce((s,{ref:o,score:a,matchData:u})=>{let c=this.map.get(o);if(typeof c!="undefined"){c=_({},c),c.tags&&(c.tags=[...c.tags]);let f=le(r,Object.keys(u.metadata));for(let l of this.index.fields){if(typeof c[l]=="undefined")continue;let m=[];for(let d of Object.values(u.metadata))typeof d[l]!="undefined"&&m.push(...d[l].position);if(!m.length)continue;let x=this.table.get([c.location,l].join(":")),v=Array.isArray(c[l])?q:se;c[l]=v(c[l],x,m,l!=="text")}let g=+!c.parent+Object.values(f).filter(l=>l).length/Object.keys(f).length;s.push(B(_({},c),{score:a*(1+Z(g,2)),terms:f}))}return s},[]).sort((s,o)=>o.score-s.score).reduce((s,o)=>{let a=this.map.get(o.location);if(typeof a!="undefined"){let u=a.parent?a.parent.location:a.location;s.set(u,[...s.get(u)||[],o])}return s},new Map);for(let[s,o]of n)if(!o.find(a=>a.location===s)){let a=this.map.get(s);o.push(B(_({},a),{score:0,terms:{}}))}let i;if(this.options.suggest){let s=this.index.query(o=>{for(let a of r)o.term(a.term,{fields:["title"],presence:lunr.Query.presence.REQUIRED,wildcard:lunr.Query.wildcard.TRAILING})});i=s.length?Object.keys(s[0].matchData.metadata):[]}return _({items:[...n.values()]},typeof i!="undefined"&&{suggest:i})}};var fe;function Ie(t){return W(this,null,function*(){let e="../lunr";if(typeof parent!="undefined"&&"IFrameWorker"in parent){let n=re("script[src]"),[i]=n.src.split("/worker");e=e.replace("..",i)}let r=[];for(let n of t.lang){switch(n){case"ja":r.push(`${e}/tinyseg.js`);break;case"hi":case"th":r.push(`${e}/wordcut.js`);break}n!=="en"&&r.push(`${e}/min/lunr.${n}.min.js`)}t.lang.length>1&&r.push(`${e}/min/lunr.multi.min.js`),r.length&&(yield importScripts(`${e}/min/lunr.stemmer.support.min.js`,...r))})}function Fe(t){return W(this,null,function*(){switch(t.type){case 0:return yield Ie(t.data.config),fe=new U(t.data),{type:1};case 2:let e=t.data;try{return{type:3,data:fe.search(e)}}catch(r){return console.warn(`Invalid query: ${e} \u2013 see https://bit.ly/2s3ChXG`),console.warn(r),{type:3,data:{items:[]}}}default:throw new TypeError("Invalid message type")}})}self.lunr=de.default;addEventListener("message",t=>W(void 0,null,function*(){postMessage(yield Fe(t.data))}));})(); diff --git a/assets/js/landing.js b/assets/js/landing.js new file mode 100644 index 000000000..683146167 --- /dev/null +++ b/assets/js/landing.js @@ -0,0 +1,80 @@ +// navbar display +!function(t) { + "use strict"; + t("a.page-scroll").bind("click", function(a) { + var e = t(this) + , o = e.attr("href"); + "undefined" != typeof o && o.startsWith("#") && (t("html, body").stop().animate({ + scrollTop: t(o).offset().top - 50 + }, 1250, "easeInOutExpo"), + a.preventDefault()) + }), + t("body").scrollspy({ + target: ".navbar-fixed-top", + offset: 51 + }), + t(".navbar-collapse ul li a").click(function() { + t(".navbar-toggle:visible").click() + }), + t("#mainNav").affix({ + offset: { + top: 100 + } + }) +}(jQuery); + + +// Freshdesk widget +document.addEventListener("DOMContentLoaded", function() { + document.getElementById("nav-support").addEventListener("click", function(){ + FreshworksWidget('clear', 'ticketForm'); + FreshworksWidget('open'); + FreshworksWidget('prefill', 'ticketForm', { + system: 'Sherlock' }); + }); + [].forEach.call(document.getElementsByClassName("account"), + function(el) { + el.addEventListener("click", function() { + frw_account(); + }) + }); +}); + +function frw_account() { + FreshworksWidget('hide', 'ticketForm', ['attachment-container']); + FreshworksWidget('prefill', 'ticketForm', { + subject: 'Sherlock account request', + type: 'Account creation', + confirmation: "Thanks! We'll be in touch soon.", + description: "Hi! \n\nI'd like to request an account on Sherlock. \n" + + "I understand I'll need to get approval from a " + + "sponsoring Faculty member." + }); + FreshworksWidget('open'); +} + +window.fwSettings={ 'widget_id':47000001678, }; +!function(){ if("function"!=typeof window.FreshworksWidget){ + var n=function(){n.q.push(arguments)}; + n.q=[],window.FreshworksWidget=n } }() + + +// status widget +var statusWidget = new Status.Widget({ + hostname: "status.sherlock.stanford.edu", + selector: "#status-widget", + debug: true, + outOfOffice: true, + paneStatistics: false, + display: { + hideOnError: true, + ledOnly: true, + panePosition: "bottom-right", + outOfOffice: { + officeOpenHour: 18, + officeCloseHour: 19, + timezone: "America/Los_Angeles" + } + } +}); + diff --git a/assets/stylesheets/landing.css b/assets/stylesheets/landing.css new file mode 100644 index 000000000..b1908b69d --- /dev/null +++ b/assets/stylesheets/landing.css @@ -0,0 +1,474 @@ +:root { + --color-accent: #B1040E; + --color-background: #FFFFFF; + --color-muted: #777777; + + --font-head: "Bangers", "Helvetica Neue",Helvetica,Arial,cursive; + --font-sans: "Roboto", -apple-system,BlinkMacSystemFont,Helvetica,Arial,sans-serif; + --font-serif: "Droid Serif", "Helvetica Neue",Helvetica,Arial,serif; +} + +body { + overflow-x: hidden; + font-family: "Roboto", "Helvetica Neue", Helvetica, Arial, sans-serif; +} + +#news-widget { + margin-left: -15px; + margin-bottom: 2px; +} + +#status-widget { + margin-top: auto; + margin-bottom: auto; + margin-left: -2px; + padding-bottom: 3px; +} +#status-widget .status-widget__led { + z-index: 50; + position: relative; + right: 1rem; + height: 1rem; + width: 1rem; + border-radius: 50% +} +#status-widget .status-widget__pane { + left: 0px; + top: 24px; + z-index: 50; + padding: 5px; + border-radius: 4px +} +#status-widget .status-widget__pane:before { + display:none; +} +#status-widget .status-widget__pane__footer { + color: var(--color-accent); + background-color: var(--color-background); + letter-spacing: 0; + font-weight: 800; + text-transform: none; + position: relative +} +#status-widget .status-widget__pane__container { + margin: 0 +} +#status-widget .status-widget__pane__heading, +#status-widget .status-widget__pane__footer { + line-height: 40px; +} +#status-widget .status-widget__pane__text { + background-color: var(--color-background); + line-height: normal; + font-size: small +} +#status-widget .status-widget__issue * { + line-height: normal; + letter-spacing: normal; + padding: 5px 0 +} +#status-widget .status-widget__issue__title { + color: var(--color-accent); + float: none +} +#status-widget .status-widget__issue__body>p { + font-size: 13px +} +#status-widget .status-widget__issue__body li { + display: list-item; + margin: 0 15px +} + +body { + overflow-x: hidden; + font-family: var(--font-sans); + webkit-tap-highlight-color: var(--color-accent); +} + +.text-muted { + color: var(--color-muted); +} + +.text-primary,a { + color: var(--color-accent); +} + +p,p.large { + font-size: 16px +} + +a.active,a:active,a:focus,a:hover { + color: #600e0e +} + +a.account { + cursor: pointer +} + +.img-centered { + margin: 0 auto +} + +.bg { + background-color: #f2f1eb +} + +.bg-light-gray { + background-color: #ddd +} + +.bg-darkest-gray { + background-color: #222 +} + +.btn-primary { + color: var(--color-background); + background-color: var(--color-accent); + border-color: var(--color-accent); +} + +.btn-primary.active,.btn-primary:active,.btn-primary:focus,.btn-primary:hover,.open .dropdown-toggle.btn-primary { + color: var(--color-background); + background-color: #600e0e; + border-color: #570d0d +} + +.btn-primary.disabled,.btn-primary.disabled.active,.btn-primary.disabled:active,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled],.btn-primary[disabled].active,.btn-primary[disabled]:active,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-primary.active,fieldset[disabled] .btn-primary:active,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover { + background-color: var(--color-accent); + border-color: var(--color-accent); +} + +.btn-primary .badge { + color: var(--color-accent); + background-color: var(--color-background); +} + +.btn-xl { + color: var(--color-background); + background-color: var(--color-accent); + border-color: var(--color-accent); + border-radius: 3px; + font-size: 18px; + margin: 20px 40px; + padding: 20px 40px +} + +.btn-xl.active,.btn-xl:active,.btn-xl:focus,.btn-xl:hover,.open .dropdown-toggle.btn-xl { + color: var(--color-background); + background-color: #600e0e; + border-color: #570d0d +} + +.btn-xl.disabled,.btn-xl.disabled.active,.btn-xl.disabled:active,.btn-xl.disabled:focus,.btn-xl.disabled:hover,.btn-xl[disabled],.btn-xl[disabled].active,.btn-xl[disabled]:active,.btn-xl[disabled]:focus,.btn-xl[disabled]:hover,fieldset[disabled] .btn-xl,fieldset[disabled] .btn-xl.active,fieldset[disabled] .btn-xl:active,fieldset[disabled] .btn-xl:focus,fieldset[disabled] .btn-xl:hover { + background-color: var(--color-accent); + border-color: var(--color-accent); +} + +.btn-xl .badge { + color: var(--color-accent); + background-color: var(--color-background); +} + +.btn-primary,.btn-xl,h1,h2,h3,h4,h5,h6 { + font-family: var(--font-sans); + text-transform: uppercase; + font-weight: 700 +} + +#mainNav.affix { + background-color: #8C1515EE; + box-shadow: + 0 0 4px rgba(0, 0, 0, 0.1), + 0 4px 8px rgba(0, 0, 0, 0.2); +} + +.navbar-custom,.navbar-custom .navbar-toggle:focus,.navbar-custom .navbar-toggle:hover { + background-color: var(--color-accent); +} + +.navbar-custom { + border-color: transparent +} + +.navbar-custom .navbar-brand { + position: absolute; + left: 10px; + margin-top: -4px; + color: var(--color-background); + font-family: var(--font-head); +} + +.navbar-custom .nav li a,.navbar-custom .navbar-toggle { + font-family: var(--font-sans); + text-transform: uppercase; + color: var(--color-background); +} + +.navbar-custom .navbar-brand.active,.navbar-custom .navbar-brand:active,.navbar-custom .navbar-brand:focus,.navbar-custom .navbar-brand:hover { + color: var(--color-background); +} + +.navbar-custom .navbar-brand img { + display: inline; + margin: 0 5px 0 -2px +} + +.navbar-custom .navbar-collapse { + border-color: rgba(255,255,255,.02); +} + +.navbar-custom .navbar-toggle { + background-color: var(--color-accent); + border-color: var(--color-accent); + font-size: 14px +} + +.navbar-custom .nav li { + display: flex +} + +.navbar-custom .nav li a { + font-weight: 400; + letter-spacing: 1px +} + +.navbar-custom .nav li a:focus,.navbar-custom .nav li a:hover { + color: var(--color-background); + background-color: #761212; + border-radius: 4px; + outline: 0 +} + +.navbar-custom .navbar-nav>.active>a { + color: var(--color-background); + border-radius: 4px; + background-color: #761212 +} + +.navbar-custom .navbar-nav>.active>a:focus,.navbar-custom .navbar-nav>.active>a:hover { + color: var(--color-background); + border-radius: 4px; + background-color: #600e0e +} + +@media (min-width: 1024px) { + .navbar-custom { + background-color:transparent; + padding: 20px 0; + -webkit-transition: padding .5s, background-color .2s; + -moz-transition: padding .5s, background-color .2s; + transition: padding .5s, background-color .2s; + border: none; + } + + .navbar-custom .navbar-brand { + position: relative; + font-size: 2em; + -webkit-transition: all .3s; + -moz-transition: all .3s; + transition: all .3s; + margin-left: -15px + } + + .navbar-custom .navbar-nav>.active>a { + border-radius: 3px + } + + .navbar-separator { + display: block; + position: relative; + top: 15px; + padding: 10px 0; + margin: 0 10px; + border-right: 1px solid #d3d3d3 + } + + .navbar-custom.affix { + background-color: var(--color-accent); + padding: 10px 0 + } + + .navbar-custom.affix .navbar-brand { + font-size: 1.5em + } +} + +@media only screen and (max-width: 1024px) { + .navbar-separator, .navbar-icon { + display:none + } +} + +@media only screen and (max-width: 768px) { + .navbar-custom .navbar-brand { + margin-top:0 + } +} + +@media only screen and (max-width: 992px) { + .navbar-custom { + margin-top:0 + } +} + +header { + background: linear-gradient(rgba(32, 32, 32, 0.90), + rgba(64, 64, 64, 0.70)), + url('/assets/images/bg_hero.jpg'); + background-attachment: fixed; + background-position: center center; + background-size: cover; + text-align: center; + color: var(--color-background); +} + +header .intro-text { + padding-top: 100px; + padding-bottom: 50px +} + +header .intro-text .intro-heading { + font-family: var(--font-head); + font-size: 50px; + line-height: 50px; + margin-bottom: 25px +} + +header .intro-text .intro-subtitle { + font-weight: 700; + font-size: 26px; + line-height: 26px; + margin-bottom: 20px +} + +header .intro-text .intro-content { + font-family: var(--font-serif); + font-style: italic; + font-size: 18px; + line-height: 26px; + padding: 10px 20px 40px +} + +header .intro-text img { + margin: 50px 50px 15px +} + +@media (min-width: 768px) { + header .intro-text { + padding-top:150px; + padding-bottom: 100px + } + + header .intro-text .intro-heading { + font-family: var(--font-head); + font-size: 75px; + line-height: 75px; + margin-bottom: 50px + } + + header .intro-text .intro-subtitle { + font-weight: 700; + font-size: 40px; + line-height: 40px; + margin-bottom: 20px + } + + header .intro-text .intro-content { + font-family: var(--font-serif); + font-style: italic; + font-size: 22px; + line-height: 30px; + padding: 10px 80px 40px + } + + header .intro-text img { + margin: 50px 50px 15px + } + + section { + padding: 50px 0 + } +} + +section { + padding: 50px 0 +} + +section h2.section-heading { + font-size: 40px; + margin-top: 0; + margin-bottom: 15px +} + +section h3.section-subheading { + font-size: 16px; + font-family: var(--font-serif); + text-transform: none; + font-style: italic; + font-weight: 400; + margin-top: 10px; + margin-bottom: 25px +} + +.service-heading { + padding: 10px 5px; +} + +.details-text { + line-height: 1em +} + +section#services { + background: linear-gradient(to bottom, + rgba(256, 256, 256, 1.00) 50px, + rgba(192, 192, 192, 0.90)), + url('/assets/images/bg_svc.jpg'); + background-position: center center; + background-size: cover; +} + +section#info { + background-color: var(--color-background); + background: linear-gradient(to bottom, + rgba(256, 256, 256, 1.00), + rgba(232, 232, 232, 1.00)); +} + +section#info .headerlink { + display: none; +} + +section#info p { + color: var(--color-muted) +} + +img::selection { + background: 0 0 +} + +img::-moz-selection { + background: 0 0 +} + +/* SU CSS overrides */ +#global-footer { + margin-top: 0 !important; +} +#global-footer .container .row { + margin: 20px 0 0 -20px !important; +} +#global-footer p.copyright { + padding-left: 0 !important; + font-size: smaller !important; +} +#bottom-logo { + margin-top:10px !important; +} +#bottom-text ul li a { + font-size: large !important; + font-weight: bold !important; +} +#bottom-text ul.small-links li a { + font-size: small !important; + font-weight: normal !important; +} diff --git a/assets/stylesheets/main.c0d16a3a.min.css b/assets/stylesheets/main.c0d16a3a.min.css new file mode 100644 index 000000000..344d70aa8 --- /dev/null +++ b/assets/stylesheets/main.c0d16a3a.min.css @@ -0,0 +1 @@ +@charset "UTF-8";html{-webkit-text-size-adjust:none;-moz-text-size-adjust:none;text-size-adjust:none;box-sizing:border-box}*,:after,:before{box-sizing:inherit}@media (prefers-reduced-motion){*,:after,:before{transition:none!important}}body{margin:0}a,button,input,label{-webkit-tap-highlight-color:transparent}a{color:inherit;text-decoration:none}hr{border:0;box-sizing:initial;display:block;height:.05rem;overflow:visible;padding:0}small{font-size:80%}sub,sup{line-height:1em}img{border-style:none}table{border-collapse:initial;border-spacing:0}td,th{font-weight:400;vertical-align:top}button{background:#0000;border:0;font-family:inherit;font-size:inherit;margin:0;padding:0}input{border:0;outline:none}:root{--md-primary-fg-color:#4051b5;--md-primary-fg-color--light:#5d6cc0;--md-primary-fg-color--dark:#303fa1;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3;--md-accent-fg-color:#526cfe;--md-accent-fg-color--transparent:#526cfe1a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-scheme=default]{color-scheme:light}[data-md-color-scheme=default] img[src$="#gh-dark-mode-only"],[data-md-color-scheme=default] img[src$="#only-dark"]{display:none}:root,[data-md-color-scheme=default]{--md-hue:225deg;--md-default-fg-color:#000000de;--md-default-fg-color--light:#0000008a;--md-default-fg-color--lighter:#00000052;--md-default-fg-color--lightest:#00000012;--md-default-bg-color:#fff;--md-default-bg-color--light:#ffffffb3;--md-default-bg-color--lighter:#ffffff4d;--md-default-bg-color--lightest:#ffffff1f;--md-code-fg-color:#36464e;--md-code-bg-color:#f5f5f5;--md-code-bg-color--light:#f5f5f5b3;--md-code-bg-color--lighter:#f5f5f54d;--md-code-hl-color:#4287ff;--md-code-hl-color--light:#4287ff1a;--md-code-hl-number-color:#d52a2a;--md-code-hl-special-color:#db1457;--md-code-hl-function-color:#a846b9;--md-code-hl-constant-color:#6e59d9;--md-code-hl-keyword-color:#3f6ec6;--md-code-hl-string-color:#1c7d4d;--md-code-hl-name-color:var(--md-code-fg-color);--md-code-hl-operator-color:var(--md-default-fg-color--light);--md-code-hl-punctuation-color:var(--md-default-fg-color--light);--md-code-hl-comment-color:var(--md-default-fg-color--light);--md-code-hl-generic-color:var(--md-default-fg-color--light);--md-code-hl-variable-color:var(--md-default-fg-color--light);--md-typeset-color:var(--md-default-fg-color);--md-typeset-a-color:var(--md-primary-fg-color);--md-typeset-del-color:#f5503d26;--md-typeset-ins-color:#0bd57026;--md-typeset-kbd-color:#fafafa;--md-typeset-kbd-accent-color:#fff;--md-typeset-kbd-border-color:#b8b8b8;--md-typeset-mark-color:#ffff0080;--md-typeset-table-color:#0000001f;--md-typeset-table-color--light:rgba(0,0,0,.035);--md-admonition-fg-color:var(--md-default-fg-color);--md-admonition-bg-color:var(--md-default-bg-color);--md-warning-fg-color:#000000de;--md-warning-bg-color:#ff9;--md-footer-fg-color:#fff;--md-footer-fg-color--light:#ffffffb3;--md-footer-fg-color--lighter:#ffffff73;--md-footer-bg-color:#000000de;--md-footer-bg-color--dark:#00000052;--md-shadow-z1:0 0.2rem 0.5rem #0000000d,0 0 0.05rem #0000001a;--md-shadow-z2:0 0.2rem 0.5rem #0000001a,0 0 0.05rem #00000040;--md-shadow-z3:0 0.2rem 0.5rem #0003,0 0 0.05rem #00000059}.md-icon svg{fill:currentcolor;display:block;height:1.2rem;width:1.2rem}body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;--md-text-font-family:var(--md-text-font,_),-apple-system,BlinkMacSystemFont,Helvetica,Arial,sans-serif;--md-code-font-family:var(--md-code-font,_),SFMono-Regular,Consolas,Menlo,monospace}aside,body,input{font-feature-settings:"kern","liga";color:var(--md-typeset-color);font-family:var(--md-text-font-family)}code,kbd,pre{font-feature-settings:"kern";font-family:var(--md-code-font-family)}:root{--md-typeset-table-sort-icon:url('data:image/svg+xml;charset=utf-8,');--md-typeset-table-sort-icon--asc:url('data:image/svg+xml;charset=utf-8,');--md-typeset-table-sort-icon--desc:url('data:image/svg+xml;charset=utf-8,')}.md-typeset{-webkit-print-color-adjust:exact;color-adjust:exact;font-size:.8rem;line-height:1.6}@media print{.md-typeset{font-size:.68rem}}.md-typeset blockquote,.md-typeset dl,.md-typeset figure,.md-typeset ol,.md-typeset pre,.md-typeset ul{margin-bottom:1em;margin-top:1em}.md-typeset h1{color:var(--md-default-fg-color--light);font-size:2em;line-height:1.3;margin:0 0 1.25em}.md-typeset h1,.md-typeset h2{font-weight:300;letter-spacing:-.01em}.md-typeset h2{font-size:1.5625em;line-height:1.4;margin:1.6em 0 .64em}.md-typeset h3{font-size:1.25em;font-weight:400;letter-spacing:-.01em;line-height:1.5;margin:1.6em 0 .8em}.md-typeset h2+h3{margin-top:.8em}.md-typeset h4{font-weight:700;letter-spacing:-.01em;margin:1em 0}.md-typeset h5,.md-typeset h6{color:var(--md-default-fg-color--light);font-size:.8em;font-weight:700;letter-spacing:-.01em;margin:1.25em 0}.md-typeset h5{text-transform:uppercase}.md-typeset hr{border-bottom:.05rem solid var(--md-default-fg-color--lightest);display:flow-root;margin:1.5em 0}.md-typeset a{color:var(--md-typeset-a-color);word-break:break-word}.md-typeset a,.md-typeset a:before{transition:color 125ms}.md-typeset a:focus,.md-typeset a:hover{color:var(--md-accent-fg-color)}.md-typeset a:focus code,.md-typeset a:hover code{background-color:var(--md-accent-fg-color--transparent);color:var(--md-accent-fg-color)}.md-typeset a code{color:var(--md-typeset-a-color)}.md-typeset a.focus-visible{outline-color:var(--md-accent-fg-color);outline-offset:.2rem}.md-typeset code,.md-typeset kbd,.md-typeset pre{color:var(--md-code-fg-color);direction:ltr;font-variant-ligatures:none;transition:background-color 125ms}@media print{.md-typeset code,.md-typeset kbd,.md-typeset pre{white-space:pre-wrap}}.md-typeset code{background-color:var(--md-code-bg-color);border-radius:.1rem;-webkit-box-decoration-break:clone;box-decoration-break:clone;font-size:.85em;padding:0 .2941176471em;transition:color 125ms,background-color 125ms;word-break:break-word}.md-typeset code:not(.focus-visible){-webkit-tap-highlight-color:transparent;outline:none}.md-typeset pre{display:flow-root;line-height:1.4;position:relative}.md-typeset pre>code{-webkit-box-decoration-break:slice;box-decoration-break:slice;box-shadow:none;display:block;margin:0;outline-color:var(--md-accent-fg-color);overflow:auto;padding:.7720588235em 1.1764705882em;scrollbar-color:var(--md-default-fg-color--lighter) #0000;scrollbar-width:thin;touch-action:auto;word-break:normal}.md-typeset pre>code:hover{scrollbar-color:var(--md-accent-fg-color) #0000}.md-typeset pre>code::-webkit-scrollbar{height:.2rem;width:.2rem}.md-typeset pre>code::-webkit-scrollbar-thumb{background-color:var(--md-default-fg-color--lighter)}.md-typeset pre>code::-webkit-scrollbar-thumb:hover{background-color:var(--md-accent-fg-color)}.md-typeset kbd{background-color:var(--md-typeset-kbd-color);border-radius:.1rem;box-shadow:0 .1rem 0 .05rem var(--md-typeset-kbd-border-color),0 .1rem 0 var(--md-typeset-kbd-border-color),0 -.1rem .2rem var(--md-typeset-kbd-accent-color) inset;color:var(--md-default-fg-color);display:inline-block;font-size:.75em;padding:0 .6666666667em;vertical-align:text-top;word-break:break-word}.md-typeset mark{background-color:var(--md-typeset-mark-color);-webkit-box-decoration-break:clone;box-decoration-break:clone;color:inherit;word-break:break-word}.md-typeset abbr{cursor:help;text-decoration:none}.md-typeset [data-preview],.md-typeset abbr{border-bottom:.05rem dotted var(--md-default-fg-color--light)}.md-typeset small{opacity:.75}[dir=ltr] .md-typeset sub,[dir=ltr] .md-typeset sup{margin-left:.078125em}[dir=rtl] .md-typeset sub,[dir=rtl] .md-typeset sup{margin-right:.078125em}[dir=ltr] .md-typeset blockquote{padding-left:.6rem}[dir=rtl] .md-typeset blockquote{padding-right:.6rem}[dir=ltr] .md-typeset blockquote{border-left:.2rem solid var(--md-default-fg-color--lighter)}[dir=rtl] .md-typeset blockquote{border-right:.2rem solid var(--md-default-fg-color--lighter)}.md-typeset blockquote{color:var(--md-default-fg-color--light);margin-left:0;margin-right:0}.md-typeset ul{list-style-type:disc}[dir=ltr] .md-typeset ol,[dir=ltr] .md-typeset ul{margin-left:.625em}[dir=rtl] .md-typeset ol,[dir=rtl] .md-typeset ul{margin-right:.625em}.md-typeset ol,.md-typeset ul{padding:0}.md-typeset ol:not([hidden]),.md-typeset ul:not([hidden]){display:flow-root}.md-typeset ol ol,.md-typeset ul ol{list-style-type:lower-alpha}.md-typeset ol ol ol,.md-typeset ul ol ol{list-style-type:lower-roman}[dir=ltr] .md-typeset ol li,[dir=ltr] .md-typeset ul li{margin-left:1.25em}[dir=rtl] .md-typeset ol li,[dir=rtl] .md-typeset ul li{margin-right:1.25em}.md-typeset ol li,.md-typeset ul li{margin-bottom:.5em}.md-typeset ol li blockquote,.md-typeset ol li p,.md-typeset ul li blockquote,.md-typeset ul li p{margin:.5em 0}.md-typeset ol li:last-child,.md-typeset ul li:last-child{margin-bottom:0}[dir=ltr] .md-typeset ol li ol,[dir=ltr] .md-typeset ol li ul,[dir=ltr] .md-typeset ul li ol,[dir=ltr] .md-typeset ul li ul{margin-left:.625em}[dir=rtl] .md-typeset ol li ol,[dir=rtl] .md-typeset ol li ul,[dir=rtl] .md-typeset ul li ol,[dir=rtl] .md-typeset ul li ul{margin-right:.625em}.md-typeset ol li ol,.md-typeset ol li ul,.md-typeset ul li ol,.md-typeset ul li ul{margin-bottom:.5em;margin-top:.5em}[dir=ltr] .md-typeset dd{margin-left:1.875em}[dir=rtl] .md-typeset dd{margin-right:1.875em}.md-typeset dd{margin-bottom:1.5em;margin-top:1em}.md-typeset img,.md-typeset svg,.md-typeset video{height:auto;max-width:100%}.md-typeset img[align=left]{margin:1em 1em 1em 0}.md-typeset img[align=right]{margin:1em 0 1em 1em}.md-typeset img[align]:only-child{margin-top:0}.md-typeset figure{display:flow-root;margin:1em auto;max-width:100%;text-align:center;width:-moz-fit-content;width:fit-content}.md-typeset figure img{display:block;margin:0 auto}.md-typeset figcaption{font-style:italic;margin:1em auto;max-width:24rem}.md-typeset iframe{max-width:100%}.md-typeset table:not([class]){background-color:var(--md-default-bg-color);border:.05rem solid var(--md-typeset-table-color);border-radius:.1rem;display:inline-block;font-size:.64rem;max-width:100%;overflow:auto;touch-action:auto}@media print{.md-typeset table:not([class]){display:table}}.md-typeset table:not([class])+*{margin-top:1.5em}.md-typeset table:not([class]) td>:first-child,.md-typeset table:not([class]) th>:first-child{margin-top:0}.md-typeset table:not([class]) td>:last-child,.md-typeset table:not([class]) th>:last-child{margin-bottom:0}.md-typeset table:not([class]) td:not([align]),.md-typeset table:not([class]) th:not([align]){text-align:left}[dir=rtl] .md-typeset table:not([class]) td:not([align]),[dir=rtl] .md-typeset table:not([class]) th:not([align]){text-align:right}.md-typeset table:not([class]) th{font-weight:700;min-width:5rem;padding:.9375em 1.25em;vertical-align:top}.md-typeset table:not([class]) td{border-top:.05rem solid var(--md-typeset-table-color);padding:.9375em 1.25em;vertical-align:top}.md-typeset table:not([class]) tbody tr{transition:background-color 125ms}.md-typeset table:not([class]) tbody tr:hover{background-color:var(--md-typeset-table-color--light);box-shadow:0 .05rem 0 var(--md-default-bg-color) inset}.md-typeset table:not([class]) a{word-break:normal}.md-typeset table th[role=columnheader]{cursor:pointer}[dir=ltr] .md-typeset table th[role=columnheader]:after{margin-left:.5em}[dir=rtl] .md-typeset table th[role=columnheader]:after{margin-right:.5em}.md-typeset table th[role=columnheader]:after{content:"";display:inline-block;height:1.2em;-webkit-mask-image:var(--md-typeset-table-sort-icon);mask-image:var(--md-typeset-table-sort-icon);-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;transition:background-color 125ms;vertical-align:text-bottom;width:1.2em}.md-typeset table th[role=columnheader]:hover:after{background-color:var(--md-default-fg-color--lighter)}.md-typeset table th[role=columnheader][aria-sort=ascending]:after{background-color:var(--md-default-fg-color--light);-webkit-mask-image:var(--md-typeset-table-sort-icon--asc);mask-image:var(--md-typeset-table-sort-icon--asc)}.md-typeset table th[role=columnheader][aria-sort=descending]:after{background-color:var(--md-default-fg-color--light);-webkit-mask-image:var(--md-typeset-table-sort-icon--desc);mask-image:var(--md-typeset-table-sort-icon--desc)}.md-typeset__scrollwrap{margin:1em -.8rem;overflow-x:auto;touch-action:auto}.md-typeset__table{display:inline-block;margin-bottom:.5em;padding:0 .8rem}@media print{.md-typeset__table{display:block}}html .md-typeset__table table{display:table;margin:0;overflow:hidden;width:100%}@media screen and (max-width:44.984375em){.md-content__inner>pre{margin:1em -.8rem}.md-content__inner>pre code{border-radius:0}}.md-typeset .md-author{border-radius:100%;display:block;flex-shrink:0;height:1.6rem;overflow:hidden;position:relative;transition:color 125ms,transform 125ms;width:1.6rem}.md-typeset .md-author img{display:block}.md-typeset .md-author--more{background:var(--md-default-fg-color--lightest);color:var(--md-default-fg-color--lighter);font-size:.6rem;font-weight:700;line-height:1.6rem;text-align:center}.md-typeset .md-author--long{height:2.4rem;width:2.4rem}.md-typeset a.md-author{transform:scale(1)}.md-typeset a.md-author img{border-radius:100%;filter:grayscale(100%) opacity(75%);transition:filter 125ms}.md-typeset a.md-author:focus,.md-typeset a.md-author:hover{transform:scale(1.1);z-index:1}.md-typeset a.md-author:focus img,.md-typeset a.md-author:hover img{filter:grayscale(0)}.md-banner{background-color:var(--md-footer-bg-color);color:var(--md-footer-fg-color);overflow:auto}@media print{.md-banner{display:none}}.md-banner--warning{background-color:var(--md-warning-bg-color);color:var(--md-warning-fg-color)}.md-banner__inner{font-size:.7rem;margin:.6rem auto;padding:0 .8rem}[dir=ltr] .md-banner__button{float:right}[dir=rtl] .md-banner__button{float:left}.md-banner__button{color:inherit;cursor:pointer;transition:opacity .25s}.no-js .md-banner__button{display:none}.md-banner__button:hover{opacity:.7}html{font-size:125%;height:100%;overflow-x:hidden}@media screen and (min-width:100em){html{font-size:137.5%}}@media screen and (min-width:125em){html{font-size:150%}}body{background-color:var(--md-default-bg-color);display:flex;flex-direction:column;font-size:.5rem;min-height:100%;position:relative;width:100%}@media print{body{display:block}}@media screen and (max-width:59.984375em){body[data-md-scrolllock]{position:fixed}}.md-grid{margin-left:auto;margin-right:auto;max-width:61rem}.md-container{display:flex;flex-direction:column;flex-grow:1}@media print{.md-container{display:block}}.md-main{flex-grow:1}.md-main__inner{display:flex;height:100%;margin-top:1.5rem}.md-ellipsis{overflow:hidden;text-overflow:ellipsis}.md-toggle{display:none}.md-option{height:0;opacity:0;position:absolute;width:0}.md-option:checked+label:not([hidden]){display:block}.md-option.focus-visible+label{outline-color:var(--md-accent-fg-color);outline-style:auto}.md-skip{background-color:var(--md-default-fg-color);border-radius:.1rem;color:var(--md-default-bg-color);font-size:.64rem;margin:.5rem;opacity:0;outline-color:var(--md-accent-fg-color);padding:.3rem .5rem;position:fixed;transform:translateY(.4rem);z-index:-1}.md-skip:focus{opacity:1;transform:translateY(0);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity 175ms 75ms;z-index:10}@page{margin:25mm}:root{--md-clipboard-icon:url('data:image/svg+xml;charset=utf-8,')}.md-clipboard{border-radius:.1rem;color:var(--md-default-fg-color--lightest);cursor:pointer;height:1.5em;outline-color:var(--md-accent-fg-color);outline-offset:.1rem;transition:color .25s;width:1.5em;z-index:1}@media print{.md-clipboard{display:none}}.md-clipboard:not(.focus-visible){-webkit-tap-highlight-color:transparent;outline:none}:hover>.md-clipboard{color:var(--md-default-fg-color--light)}.md-clipboard:focus,.md-clipboard:hover{color:var(--md-accent-fg-color)}.md-clipboard:after{background-color:currentcolor;content:"";display:block;height:1.125em;margin:0 auto;-webkit-mask-image:var(--md-clipboard-icon);mask-image:var(--md-clipboard-icon);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;width:1.125em}.md-clipboard--inline{cursor:pointer}.md-clipboard--inline code{transition:color .25s,background-color .25s}.md-clipboard--inline:focus code,.md-clipboard--inline:hover code{background-color:var(--md-accent-fg-color--transparent);color:var(--md-accent-fg-color)}:root{--md-code-select-icon:url('data:image/svg+xml;charset=utf-8,');--md-code-copy-icon:url('data:image/svg+xml;charset=utf-8,')}.md-typeset .md-code__content{display:grid}.md-code__nav{background-color:var(--md-code-bg-color--lighter);border-radius:.1rem;display:flex;gap:.2rem;padding:.2rem;position:absolute;right:.25em;top:.25em;transition:background-color .25s;z-index:1}:hover>.md-code__nav{background-color:var(--md-code-bg-color--light)}.md-code__button{color:var(--md-default-fg-color--lightest);cursor:pointer;display:block;height:1.5em;outline-color:var(--md-accent-fg-color);outline-offset:.1rem;transition:color .25s;width:1.5em}:hover>*>.md-code__button{color:var(--md-default-fg-color--light)}.md-code__button.focus-visible,.md-code__button:hover{color:var(--md-accent-fg-color)}.md-code__button--active{color:var(--md-default-fg-color)!important}.md-code__button:after{background-color:currentcolor;content:"";display:block;height:1.125em;margin:0 auto;-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;width:1.125em}.md-code__button[data-md-type=select]:after{-webkit-mask-image:var(--md-code-select-icon);mask-image:var(--md-code-select-icon)}.md-code__button[data-md-type=copy]:after{-webkit-mask-image:var(--md-code-copy-icon);mask-image:var(--md-code-copy-icon)}@keyframes consent{0%{opacity:0;transform:translateY(100%)}to{opacity:1;transform:translateY(0)}}@keyframes overlay{0%{opacity:0}to{opacity:1}}.md-consent__overlay{animation:overlay .25s both;-webkit-backdrop-filter:blur(.1rem);backdrop-filter:blur(.1rem);background-color:#0000008a;height:100%;opacity:1;position:fixed;top:0;width:100%;z-index:5}.md-consent__inner{animation:consent .5s cubic-bezier(.1,.7,.1,1) both;background-color:var(--md-default-bg-color);border:0;border-radius:.1rem;bottom:0;box-shadow:0 0 .2rem #0000001a,0 .2rem .4rem #0003;max-height:100%;overflow:auto;padding:0;position:fixed;width:100%;z-index:5}.md-consent__form{padding:.8rem}.md-consent__settings{display:none;margin:1em 0}input:checked+.md-consent__settings{display:block}.md-consent__controls{margin-bottom:.8rem}.md-typeset .md-consent__controls .md-button{display:inline}@media screen and (max-width:44.984375em){.md-typeset .md-consent__controls .md-button{display:block;margin-top:.4rem;text-align:center;width:100%}}.md-consent label{cursor:pointer}.md-content{flex-grow:1;min-width:0}.md-content__inner{margin:0 .8rem 1.2rem;padding-top:.6rem}@media screen and (min-width:76.25em){[dir=ltr] .md-sidebar--primary:not([hidden])~.md-content>.md-content__inner{margin-left:1.2rem}[dir=ltr] .md-sidebar--secondary:not([hidden])~.md-content>.md-content__inner,[dir=rtl] .md-sidebar--primary:not([hidden])~.md-content>.md-content__inner{margin-right:1.2rem}[dir=rtl] .md-sidebar--secondary:not([hidden])~.md-content>.md-content__inner{margin-left:1.2rem}}.md-content__inner:before{content:"";display:block;height:.4rem}.md-content__inner>:last-child{margin-bottom:0}[dir=ltr] .md-content__button{float:right}[dir=rtl] .md-content__button{float:left}[dir=ltr] .md-content__button{margin-left:.4rem}[dir=rtl] .md-content__button{margin-right:.4rem}.md-content__button{margin:.4rem 0;padding:0}@media print{.md-content__button{display:none}}.md-typeset .md-content__button{color:var(--md-default-fg-color--lighter)}.md-content__button svg{display:inline;vertical-align:top}[dir=rtl] .md-content__button svg{transform:scaleX(-1)}[dir=ltr] .md-dialog{right:.8rem}[dir=rtl] .md-dialog{left:.8rem}.md-dialog{background-color:var(--md-default-fg-color);border-radius:.1rem;bottom:.8rem;box-shadow:var(--md-shadow-z3);min-width:11.1rem;opacity:0;padding:.4rem .6rem;pointer-events:none;position:fixed;transform:translateY(100%);transition:transform 0ms .4s,opacity .4s;z-index:4}@media print{.md-dialog{display:none}}.md-dialog--active{opacity:1;pointer-events:auto;transform:translateY(0);transition:transform .4s cubic-bezier(.075,.85,.175,1),opacity .4s}.md-dialog__inner{color:var(--md-default-bg-color);font-size:.7rem}.md-feedback{margin:2em 0 1em;text-align:center}.md-feedback fieldset{border:none;margin:0;padding:0}.md-feedback__title{font-weight:700;margin:1em auto}.md-feedback__inner{position:relative}.md-feedback__list{display:flex;flex-wrap:wrap;place-content:baseline center;position:relative}.md-feedback__list:hover .md-icon:not(:disabled){color:var(--md-default-fg-color--lighter)}:disabled .md-feedback__list{min-height:1.8rem}.md-feedback__icon{color:var(--md-default-fg-color--light);cursor:pointer;flex-shrink:0;margin:0 .1rem;transition:color 125ms}.md-feedback__icon:not(:disabled).md-icon:hover{color:var(--md-accent-fg-color)}.md-feedback__icon:disabled{color:var(--md-default-fg-color--lightest);pointer-events:none}.md-feedback__note{opacity:0;position:relative;transform:translateY(.4rem);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s}.md-feedback__note>*{margin:0 auto;max-width:16rem}:disabled .md-feedback__note{opacity:1;transform:translateY(0)}.md-footer{background-color:var(--md-footer-bg-color);color:var(--md-footer-fg-color)}@media print{.md-footer{display:none}}.md-footer__inner{justify-content:space-between;overflow:auto;padding:.2rem}.md-footer__inner:not([hidden]){display:flex}.md-footer__link{align-items:end;display:flex;flex-grow:0.01;margin-bottom:.4rem;margin-top:1rem;max-width:100%;outline-color:var(--md-accent-fg-color);overflow:hidden;transition:opacity .25s}.md-footer__link:focus,.md-footer__link:hover{opacity:.7}[dir=rtl] .md-footer__link svg{transform:scaleX(-1)}@media screen and (max-width:44.984375em){.md-footer__link--prev{flex-shrink:0}.md-footer__link--prev .md-footer__title{display:none}}[dir=ltr] .md-footer__link--next{margin-left:auto}[dir=rtl] .md-footer__link--next{margin-right:auto}.md-footer__link--next{text-align:right}[dir=rtl] .md-footer__link--next{text-align:left}.md-footer__title{flex-grow:1;font-size:.9rem;margin-bottom:.7rem;max-width:calc(100% - 2.4rem);padding:0 1rem;white-space:nowrap}.md-footer__button{margin:.2rem;padding:.4rem}.md-footer__direction{font-size:.64rem;opacity:.7}.md-footer-meta{background-color:var(--md-footer-bg-color--dark)}.md-footer-meta__inner{display:flex;flex-wrap:wrap;justify-content:space-between;padding:.2rem}html .md-footer-meta.md-typeset a{color:var(--md-footer-fg-color--light)}html .md-footer-meta.md-typeset a:focus,html .md-footer-meta.md-typeset a:hover{color:var(--md-footer-fg-color)}.md-copyright{color:var(--md-footer-fg-color--lighter);font-size:.64rem;margin:auto .6rem;padding:.4rem 0;width:100%}@media screen and (min-width:45em){.md-copyright{width:auto}}.md-copyright__highlight{color:var(--md-footer-fg-color--light)}.md-social{display:inline-flex;gap:.2rem;margin:0 .4rem;padding:.2rem 0 .6rem}@media screen and (min-width:45em){.md-social{padding:.6rem 0}}.md-social__link{display:inline-block;height:1.6rem;text-align:center;width:1.6rem}.md-social__link:before{line-height:1.9}.md-social__link svg{fill:currentcolor;max-height:.8rem;vertical-align:-25%}.md-typeset .md-button{border:.1rem solid;border-radius:.1rem;color:var(--md-primary-fg-color);cursor:pointer;display:inline-block;font-weight:700;padding:.625em 2em;transition:color 125ms,background-color 125ms,border-color 125ms}.md-typeset .md-button--primary{background-color:var(--md-primary-fg-color);border-color:var(--md-primary-fg-color);color:var(--md-primary-bg-color)}.md-typeset .md-button:focus,.md-typeset .md-button:hover{background-color:var(--md-accent-fg-color);border-color:var(--md-accent-fg-color);color:var(--md-accent-bg-color)}[dir=ltr] .md-typeset .md-input{border-top-left-radius:.1rem}[dir=ltr] .md-typeset .md-input,[dir=rtl] .md-typeset .md-input{border-top-right-radius:.1rem}[dir=rtl] .md-typeset .md-input{border-top-left-radius:.1rem}.md-typeset .md-input{border-bottom:.1rem solid var(--md-default-fg-color--lighter);box-shadow:var(--md-shadow-z1);font-size:.8rem;height:1.8rem;padding:0 .6rem;transition:border .25s,box-shadow .25s}.md-typeset .md-input:focus,.md-typeset .md-input:hover{border-bottom-color:var(--md-accent-fg-color);box-shadow:var(--md-shadow-z2)}.md-typeset .md-input--stretch{width:100%}.md-header{background-color:var(--md-primary-fg-color);box-shadow:0 0 .2rem #0000,0 .2rem .4rem #0000;color:var(--md-primary-bg-color);display:block;left:0;position:sticky;right:0;top:0;z-index:4}@media print{.md-header{display:none}}.md-header[hidden]{transform:translateY(-100%);transition:transform .25s cubic-bezier(.8,0,.6,1),box-shadow .25s}.md-header--shadow{box-shadow:0 0 .2rem #0000001a,0 .2rem .4rem #0003;transition:transform .25s cubic-bezier(.1,.7,.1,1),box-shadow .25s}.md-header__inner{align-items:center;display:flex;padding:0 .2rem}.md-header__button{color:currentcolor;cursor:pointer;margin:.2rem;outline-color:var(--md-accent-fg-color);padding:.4rem;position:relative;transition:opacity .25s;vertical-align:middle;z-index:1}.md-header__button:hover{opacity:.7}.md-header__button:not([hidden]){display:inline-block}.md-header__button:not(.focus-visible){-webkit-tap-highlight-color:transparent;outline:none}.md-header__button.md-logo{margin:.2rem;padding:.4rem}@media screen and (max-width:76.234375em){.md-header__button.md-logo{display:none}}.md-header__button.md-logo img,.md-header__button.md-logo svg{fill:currentcolor;display:block;height:1.2rem;width:auto}@media screen and (min-width:60em){.md-header__button[for=__search]{display:none}}.no-js .md-header__button[for=__search]{display:none}[dir=rtl] .md-header__button[for=__search] svg{transform:scaleX(-1)}@media screen and (min-width:76.25em){.md-header__button[for=__drawer]{display:none}}.md-header__topic{display:flex;max-width:100%;position:absolute;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s;white-space:nowrap}.md-header__topic+.md-header__topic{opacity:0;pointer-events:none;transform:translateX(1.25rem);transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s;z-index:-1}[dir=rtl] .md-header__topic+.md-header__topic{transform:translateX(-1.25rem)}.md-header__topic:first-child{font-weight:700}[dir=ltr] .md-header__title{margin-left:1rem;margin-right:.4rem}[dir=rtl] .md-header__title{margin-left:.4rem;margin-right:1rem}.md-header__title{flex-grow:1;font-size:.9rem;height:2.4rem;line-height:2.4rem}.md-header__title--active .md-header__topic{opacity:0;pointer-events:none;transform:translateX(-1.25rem);transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s;z-index:-1}[dir=rtl] .md-header__title--active .md-header__topic{transform:translateX(1.25rem)}.md-header__title--active .md-header__topic+.md-header__topic{opacity:1;pointer-events:auto;transform:translateX(0);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s;z-index:0}.md-header__title>.md-header__ellipsis{height:100%;position:relative;width:100%}.md-header__option{display:flex;flex-shrink:0;max-width:100%;transition:max-width 0ms .25s,opacity .25s .25s;white-space:nowrap}[data-md-toggle=search]:checked~.md-header .md-header__option{max-width:0;opacity:0;transition:max-width 0ms,opacity 0ms}.md-header__option>input{bottom:0}.md-header__source{display:none}@media screen and (min-width:60em){[dir=ltr] .md-header__source{margin-left:1rem}[dir=rtl] .md-header__source{margin-right:1rem}.md-header__source{display:block;max-width:11.7rem;width:11.7rem}}@media screen and (min-width:76.25em){[dir=ltr] .md-header__source{margin-left:1.4rem}[dir=rtl] .md-header__source{margin-right:1.4rem}}.md-meta{color:var(--md-default-fg-color--light);font-size:.7rem;line-height:1.3}.md-meta__list{display:inline-flex;flex-wrap:wrap;list-style:none;margin:0;padding:0}.md-meta__item:not(:last-child):after{content:"·";margin-left:.2rem;margin-right:.2rem}.md-meta__link{color:var(--md-typeset-a-color)}.md-meta__link:focus,.md-meta__link:hover{color:var(--md-accent-fg-color)}.md-draft{background-color:#ff1744;border-radius:.125em;color:#fff;display:inline-block;font-weight:700;padding-left:.5714285714em;padding-right:.5714285714em}:root{--md-nav-icon--prev:url('data:image/svg+xml;charset=utf-8,');--md-nav-icon--next:url('data:image/svg+xml;charset=utf-8,');--md-toc-icon:url('data:image/svg+xml;charset=utf-8,')}.md-nav{font-size:.7rem;line-height:1.3}.md-nav__title{color:var(--md-default-fg-color--light);display:block;font-weight:700;overflow:hidden;padding:0 .6rem;text-overflow:ellipsis}.md-nav__title .md-nav__button{display:none}.md-nav__title .md-nav__button img{height:100%;width:auto}.md-nav__title .md-nav__button.md-logo img,.md-nav__title .md-nav__button.md-logo svg{fill:currentcolor;display:block;height:2.4rem;max-width:100%;object-fit:contain;width:auto}.md-nav__list{list-style:none;margin:0;padding:0}.md-nav__link{align-items:flex-start;display:flex;gap:.4rem;margin-top:.625em;scroll-snap-align:start;transition:color 125ms}.md-nav__link--passed,.md-nav__link--passed code{color:var(--md-default-fg-color--light)}.md-nav__item .md-nav__link--active,.md-nav__item .md-nav__link--active code{color:var(--md-typeset-a-color)}.md-nav__link .md-ellipsis{position:relative}.md-nav__link .md-ellipsis code{word-break:normal}[dir=ltr] .md-nav__link .md-icon:last-child{margin-left:auto}[dir=rtl] .md-nav__link .md-icon:last-child{margin-right:auto}.md-nav__link .md-typeset{font-size:.7rem;line-height:1.3}.md-nav__link svg{fill:currentcolor;flex-shrink:0;height:1.3em}.md-nav__link[for]:focus,.md-nav__link[for]:hover,.md-nav__link[href]:focus,.md-nav__link[href]:hover{color:var(--md-accent-fg-color);cursor:pointer}.md-nav__link[for]:focus code,.md-nav__link[for]:hover code,.md-nav__link[href]:focus code,.md-nav__link[href]:hover code{background-color:var(--md-accent-fg-color--transparent);color:var(--md-accent-fg-color)}.md-nav__link.focus-visible{outline-color:var(--md-accent-fg-color);outline-offset:.2rem}.md-nav--primary .md-nav__link[for=__toc]{display:none}.md-nav--primary .md-nav__link[for=__toc] .md-icon:after{background-color:currentcolor;display:block;height:100%;-webkit-mask-image:var(--md-toc-icon);mask-image:var(--md-toc-icon);width:100%}.md-nav--primary .md-nav__link[for=__toc]~.md-nav{display:none}.md-nav__container>.md-nav__link{margin-top:0}.md-nav__container>.md-nav__link:first-child{flex-grow:1;min-width:0}.md-nav__icon{flex-shrink:0}.md-nav__source{display:none}@media screen and (max-width:76.234375em){.md-nav--primary,.md-nav--primary .md-nav{background-color:var(--md-default-bg-color);display:flex;flex-direction:column;height:100%;left:0;position:absolute;right:0;top:0;z-index:1}.md-nav--primary .md-nav__item,.md-nav--primary .md-nav__title{font-size:.8rem;line-height:1.5}.md-nav--primary .md-nav__title{background-color:var(--md-default-fg-color--lightest);color:var(--md-default-fg-color--light);cursor:pointer;height:5.6rem;line-height:2.4rem;padding:3rem .8rem .2rem;position:relative;white-space:nowrap}[dir=ltr] .md-nav--primary .md-nav__title .md-nav__icon{left:.4rem}[dir=rtl] .md-nav--primary .md-nav__title .md-nav__icon{right:.4rem}.md-nav--primary .md-nav__title .md-nav__icon{display:block;height:1.2rem;margin:.2rem;position:absolute;top:.4rem;width:1.2rem}.md-nav--primary .md-nav__title .md-nav__icon:after{background-color:currentcolor;content:"";display:block;height:100%;-webkit-mask-image:var(--md-nav-icon--prev);mask-image:var(--md-nav-icon--prev);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;width:100%}.md-nav--primary .md-nav__title~.md-nav__list{background-color:var(--md-default-bg-color);box-shadow:0 .05rem 0 var(--md-default-fg-color--lightest) inset;overflow-y:auto;scroll-snap-type:y mandatory;touch-action:pan-y}.md-nav--primary .md-nav__title~.md-nav__list>:first-child{border-top:0}.md-nav--primary .md-nav__title[for=__drawer]{background-color:var(--md-primary-fg-color);color:var(--md-primary-bg-color);font-weight:700}.md-nav--primary .md-nav__title .md-logo{display:block;left:.2rem;margin:.2rem;padding:.4rem;position:absolute;right:.2rem;top:.2rem}.md-nav--primary .md-nav__list{flex:1}.md-nav--primary .md-nav__item{border-top:.05rem solid var(--md-default-fg-color--lightest)}.md-nav--primary .md-nav__item--active>.md-nav__link{color:var(--md-typeset-a-color)}.md-nav--primary .md-nav__item--active>.md-nav__link:focus,.md-nav--primary .md-nav__item--active>.md-nav__link:hover{color:var(--md-accent-fg-color)}.md-nav--primary .md-nav__link{margin-top:0;padding:.6rem .8rem}.md-nav--primary .md-nav__link svg{margin-top:.1em}.md-nav--primary .md-nav__link>.md-nav__link{padding:0}[dir=ltr] .md-nav--primary .md-nav__link .md-nav__icon{margin-right:-.2rem}[dir=rtl] .md-nav--primary .md-nav__link .md-nav__icon{margin-left:-.2rem}.md-nav--primary .md-nav__link .md-nav__icon{font-size:1.2rem;height:1.2rem;width:1.2rem}.md-nav--primary .md-nav__link .md-nav__icon:after{background-color:currentcolor;content:"";display:block;height:100%;-webkit-mask-image:var(--md-nav-icon--next);mask-image:var(--md-nav-icon--next);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;width:100%}[dir=rtl] .md-nav--primary .md-nav__icon:after{transform:scale(-1)}.md-nav--primary .md-nav--secondary .md-nav{background-color:initial;position:static}[dir=ltr] .md-nav--primary .md-nav--secondary .md-nav .md-nav__link{padding-left:1.4rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav__link{padding-right:1.4rem}[dir=ltr] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav__link{padding-left:2rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav__link{padding-right:2rem}[dir=ltr] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav__link{padding-left:2.6rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav__link{padding-right:2.6rem}[dir=ltr] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav .md-nav__link{padding-left:3.2rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav .md-nav__link{padding-right:3.2rem}.md-nav--secondary{background-color:initial}.md-nav__toggle~.md-nav{display:flex;opacity:0;transform:translateX(100%);transition:transform .25s cubic-bezier(.8,0,.6,1),opacity 125ms 50ms}[dir=rtl] .md-nav__toggle~.md-nav{transform:translateX(-100%)}.md-nav__toggle:checked~.md-nav{opacity:1;transform:translateX(0);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity 125ms 125ms}.md-nav__toggle:checked~.md-nav>.md-nav__list{-webkit-backface-visibility:hidden;backface-visibility:hidden}}@media screen and (max-width:59.984375em){.md-nav--primary .md-nav__link[for=__toc]{display:flex}.md-nav--primary .md-nav__link[for=__toc] .md-icon:after{content:""}.md-nav--primary .md-nav__link[for=__toc]+.md-nav__link{display:none}.md-nav--primary .md-nav__link[for=__toc]~.md-nav{display:flex}.md-nav__source{background-color:var(--md-primary-fg-color--dark);color:var(--md-primary-bg-color);display:block;padding:0 .2rem}}@media screen and (min-width:60em) and (max-width:76.234375em){.md-nav--integrated .md-nav__link[for=__toc]{display:flex}.md-nav--integrated .md-nav__link[for=__toc] .md-icon:after{content:""}.md-nav--integrated .md-nav__link[for=__toc]+.md-nav__link{display:none}.md-nav--integrated .md-nav__link[for=__toc]~.md-nav{display:flex}}@media screen and (min-width:60em){.md-nav{margin-bottom:-.4rem}.md-nav--secondary .md-nav__title{background:var(--md-default-bg-color);box-shadow:0 0 .4rem .4rem var(--md-default-bg-color);position:sticky;top:0;z-index:1}.md-nav--secondary .md-nav__title[for=__toc]{scroll-snap-align:start}.md-nav--secondary .md-nav__title .md-nav__icon{display:none}[dir=ltr] .md-nav--secondary .md-nav__list{padding-left:.6rem}[dir=rtl] .md-nav--secondary .md-nav__list{padding-right:.6rem}.md-nav--secondary .md-nav__list{padding-bottom:.4rem}[dir=ltr] .md-nav--secondary .md-nav__item>.md-nav__link{margin-right:.4rem}[dir=rtl] .md-nav--secondary .md-nav__item>.md-nav__link{margin-left:.4rem}}@media screen and (min-width:76.25em){.md-nav{margin-bottom:-.4rem;transition:max-height .25s cubic-bezier(.86,0,.07,1)}.md-nav--primary .md-nav__title{background:var(--md-default-bg-color);box-shadow:0 0 .4rem .4rem var(--md-default-bg-color);position:sticky;top:0;z-index:1}.md-nav--primary .md-nav__title[for=__drawer]{scroll-snap-align:start}.md-nav--primary .md-nav__title .md-nav__icon{display:none}[dir=ltr] .md-nav--primary .md-nav__list{padding-left:.6rem}[dir=rtl] .md-nav--primary .md-nav__list{padding-right:.6rem}.md-nav--primary .md-nav__list{padding-bottom:.4rem}[dir=ltr] .md-nav--primary .md-nav__item>.md-nav__link{margin-right:.4rem}[dir=rtl] .md-nav--primary .md-nav__item>.md-nav__link{margin-left:.4rem}.md-nav__toggle~.md-nav{display:grid;grid-template-rows:0fr;opacity:0;transition:grid-template-rows .25s cubic-bezier(.86,0,.07,1),opacity .25s,visibility 0ms .25s;visibility:collapse}.md-nav__toggle~.md-nav>.md-nav__list{overflow:hidden}.md-nav__toggle.md-toggle--indeterminate~.md-nav,.md-nav__toggle:checked~.md-nav{grid-template-rows:1fr;opacity:1;transition:grid-template-rows .25s cubic-bezier(.86,0,.07,1),opacity .15s .1s,visibility 0ms;visibility:visible}.md-nav__toggle.md-toggle--indeterminate~.md-nav{transition:none}.md-nav__item--nested>.md-nav>.md-nav__title{display:none}.md-nav__item--section{display:block;margin:1.25em 0}.md-nav__item--section:last-child{margin-bottom:0}.md-nav__item--section>.md-nav__link{font-weight:700}.md-nav__item--section>.md-nav__link[for]{color:var(--md-default-fg-color--light)}.md-nav__item--section>.md-nav__link:not(.md-nav__container){pointer-events:none}.md-nav__item--section>.md-nav__link .md-icon,.md-nav__item--section>.md-nav__link>[for]{display:none}[dir=ltr] .md-nav__item--section>.md-nav{margin-left:-.6rem}[dir=rtl] .md-nav__item--section>.md-nav{margin-right:-.6rem}.md-nav__item--section>.md-nav{display:block;opacity:1;visibility:visible}.md-nav__item--section>.md-nav>.md-nav__list>.md-nav__item{padding:0}.md-nav__icon{border-radius:100%;height:.9rem;transition:background-color .25s;width:.9rem}.md-nav__icon:hover{background-color:var(--md-accent-fg-color--transparent)}.md-nav__icon:after{background-color:currentcolor;border-radius:100%;content:"";display:inline-block;height:100%;-webkit-mask-image:var(--md-nav-icon--next);mask-image:var(--md-nav-icon--next);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;transition:transform .25s;vertical-align:-.1rem;width:100%}[dir=rtl] .md-nav__icon:after{transform:rotate(180deg)}.md-nav__item--nested .md-nav__toggle:checked~.md-nav__link .md-nav__icon:after,.md-nav__item--nested .md-toggle--indeterminate~.md-nav__link .md-nav__icon:after{transform:rotate(90deg)}.md-nav--lifted>.md-nav__list>.md-nav__item,.md-nav--lifted>.md-nav__title{display:none}.md-nav--lifted>.md-nav__list>.md-nav__item--active{display:block}.md-nav--lifted>.md-nav__list>.md-nav__item--active>.md-nav__link{background:var(--md-default-bg-color);box-shadow:0 0 .4rem .4rem var(--md-default-bg-color);margin-top:0;position:sticky;top:0;z-index:1}.md-nav--lifted>.md-nav__list>.md-nav__item--active>.md-nav__link:not(.md-nav__container){pointer-events:none}.md-nav--lifted>.md-nav__list>.md-nav__item--active.md-nav__item--section{margin:0}[dir=ltr] .md-nav--lifted>.md-nav__list>.md-nav__item>.md-nav:not(.md-nav--secondary){margin-left:-.6rem}[dir=rtl] .md-nav--lifted>.md-nav__list>.md-nav__item>.md-nav:not(.md-nav--secondary){margin-right:-.6rem}.md-nav--lifted>.md-nav__list>.md-nav__item>[for]{color:var(--md-default-fg-color--light)}.md-nav--lifted .md-nav[data-md-level="1"]{grid-template-rows:1fr;opacity:1;visibility:visible}[dir=ltr] .md-nav--integrated>.md-nav__list>.md-nav__item--active .md-nav--secondary{border-left:.05rem solid var(--md-primary-fg-color)}[dir=rtl] .md-nav--integrated>.md-nav__list>.md-nav__item--active .md-nav--secondary{border-right:.05rem solid var(--md-primary-fg-color)}.md-nav--integrated>.md-nav__list>.md-nav__item--active .md-nav--secondary{display:block;margin-bottom:1.25em;opacity:1;visibility:visible}.md-nav--integrated>.md-nav__list>.md-nav__item--active .md-nav--secondary>.md-nav__list{overflow:visible;padding-bottom:0}.md-nav--integrated>.md-nav__list>.md-nav__item--active .md-nav--secondary>.md-nav__title{display:none}}.md-pagination{font-size:.8rem;font-weight:700;gap:.4rem}.md-pagination,.md-pagination>*{align-items:center;display:flex;justify-content:center}.md-pagination>*{border-radius:.2rem;height:1.8rem;min-width:1.8rem;text-align:center}.md-pagination__current{background-color:var(--md-default-fg-color--lightest);color:var(--md-default-fg-color--light)}.md-pagination__link{transition:color 125ms,background-color 125ms}.md-pagination__link:focus,.md-pagination__link:hover{background-color:var(--md-accent-fg-color--transparent);color:var(--md-accent-fg-color)}.md-pagination__link:focus svg,.md-pagination__link:hover svg{color:var(--md-accent-fg-color)}.md-pagination__link.focus-visible{outline-color:var(--md-accent-fg-color);outline-offset:.2rem}.md-pagination__link svg{fill:currentcolor;color:var(--md-default-fg-color--lighter);display:block;max-height:100%;width:1.2rem}:root{--md-path-icon:url('data:image/svg+xml;charset=utf-8,')}.md-path{font-size:.7rem;margin:0 .8rem;overflow:auto;padding-top:1.2rem}.md-path:not([hidden]){display:block}@media screen and (min-width:76.25em){.md-path{margin:0 1.2rem}}.md-path__list{align-items:center;display:flex;gap:.2rem;list-style:none;margin:0;padding:0}.md-path__item:not(:first-child){display:inline-flex;gap:.2rem;white-space:nowrap}.md-path__item:not(:first-child):before{background-color:var(--md-default-fg-color--lighter);content:"";display:inline;height:.8rem;-webkit-mask-image:var(--md-path-icon);mask-image:var(--md-path-icon);width:.8rem}.md-path__link{align-items:center;color:var(--md-default-fg-color--light);display:flex}.md-path__link:focus,.md-path__link:hover{color:var(--md-accent-fg-color)}:root{--md-post-pin-icon:url('data:image/svg+xml;charset=utf-8,')}.md-post__back{border-bottom:.05rem solid var(--md-default-fg-color--lightest);margin-bottom:1.2rem;padding-bottom:1.2rem}@media screen and (max-width:76.234375em){.md-post__back{display:none}}[dir=rtl] .md-post__back svg{transform:scaleX(-1)}.md-post__authors{display:flex;flex-direction:column;gap:.6rem;margin:0 .6rem 1.2rem}.md-post .md-post__meta a{transition:color 125ms}.md-post .md-post__meta a:focus,.md-post .md-post__meta a:hover{color:var(--md-accent-fg-color)}.md-post__title{color:var(--md-default-fg-color--light);font-weight:700}.md-post--excerpt{margin-bottom:3.2rem}.md-post--excerpt .md-post__header{align-items:center;display:flex;gap:.6rem;min-height:1.6rem}.md-post--excerpt .md-post__authors{align-items:center;display:inline-flex;flex-direction:row;gap:.2rem;margin:0;min-height:2.4rem}[dir=ltr] .md-post--excerpt .md-post__meta .md-meta__list{margin-right:.4rem}[dir=rtl] .md-post--excerpt .md-post__meta .md-meta__list{margin-left:.4rem}.md-post--excerpt .md-post__content>:first-child{--md-scroll-margin:6rem;margin-top:0}.md-post>.md-nav--secondary{margin:1em 0}.md-pin{background:var(--md-default-fg-color--lightest);border-radius:1rem;margin-top:-.05rem;padding:.2rem}.md-pin:after{background-color:currentcolor;content:"";display:block;height:.6rem;margin:0 auto;-webkit-mask-image:var(--md-post-pin-icon);mask-image:var(--md-post-pin-icon);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;width:.6rem}.md-profile{align-items:center;display:flex;font-size:.7rem;gap:.6rem;line-height:1.4;width:100%}.md-profile__description{flex-grow:1}.md-content--post{display:flex}@media screen and (max-width:76.234375em){.md-content--post{flex-flow:column-reverse}}.md-content--post>.md-content__inner{min-width:0}@media screen and (min-width:76.25em){[dir=ltr] .md-content--post>.md-content__inner{margin-left:1.2rem}[dir=rtl] .md-content--post>.md-content__inner{margin-right:1.2rem}}@media screen and (max-width:76.234375em){.md-sidebar.md-sidebar--post{padding:0;position:static;width:100%}.md-sidebar.md-sidebar--post .md-sidebar__scrollwrap{overflow:visible}.md-sidebar.md-sidebar--post .md-sidebar__inner{padding:0}.md-sidebar.md-sidebar--post .md-post__meta{margin-left:.6rem;margin-right:.6rem}.md-sidebar.md-sidebar--post .md-nav__item{border:none;display:inline}.md-sidebar.md-sidebar--post .md-nav__list{display:inline-flex;flex-wrap:wrap;gap:.6rem;padding-bottom:.6rem;padding-top:.6rem}.md-sidebar.md-sidebar--post .md-nav__link{padding:0}.md-sidebar.md-sidebar--post .md-nav{height:auto;margin-bottom:0;position:static}}:root{--md-progress-value:0;--md-progress-delay:400ms}.md-progress{background:var(--md-primary-bg-color);height:.075rem;opacity:min(clamp(0,var(--md-progress-value),1),clamp(0,100 - var(--md-progress-value),1));position:fixed;top:0;transform:scaleX(calc(var(--md-progress-value)*1%));transform-origin:left;transition:transform .5s cubic-bezier(.19,1,.22,1),opacity .25s var(--md-progress-delay);width:100%;z-index:4}:root{--md-search-result-icon:url('data:image/svg+xml;charset=utf-8,')}.md-search{position:relative}@media screen and (min-width:60em){.md-search{padding:.2rem 0}}.no-js .md-search{display:none}.md-search__overlay{opacity:0;z-index:1}@media screen and (max-width:59.984375em){[dir=ltr] .md-search__overlay{left:-2.2rem}[dir=rtl] .md-search__overlay{right:-2.2rem}.md-search__overlay{background-color:var(--md-default-bg-color);border-radius:1rem;height:2rem;overflow:hidden;pointer-events:none;position:absolute;top:-1rem;transform-origin:center;transition:transform .3s .1s,opacity .2s .2s;width:2rem}[data-md-toggle=search]:checked~.md-header .md-search__overlay{opacity:1;transition:transform .4s,opacity .1s}}@media screen and (min-width:60em){[dir=ltr] .md-search__overlay{left:0}[dir=rtl] .md-search__overlay{right:0}.md-search__overlay{background-color:#0000008a;cursor:pointer;height:0;position:fixed;top:0;transition:width 0ms .25s,height 0ms .25s,opacity .25s;width:0}[data-md-toggle=search]:checked~.md-header .md-search__overlay{height:200vh;opacity:1;transition:width 0ms,height 0ms,opacity .25s;width:100%}}@media screen and (max-width:29.984375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{transform:scale(45)}}@media screen and (min-width:30em) and (max-width:44.984375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{transform:scale(60)}}@media screen and (min-width:45em) and (max-width:59.984375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{transform:scale(75)}}.md-search__inner{-webkit-backface-visibility:hidden;backface-visibility:hidden}@media screen and (max-width:59.984375em){[dir=ltr] .md-search__inner{left:0}[dir=rtl] .md-search__inner{right:0}.md-search__inner{height:0;opacity:0;overflow:hidden;position:fixed;top:0;transform:translateX(5%);transition:width 0ms .3s,height 0ms .3s,transform .15s cubic-bezier(.4,0,.2,1) .15s,opacity .15s .15s;width:0;z-index:2}[dir=rtl] .md-search__inner{transform:translateX(-5%)}[data-md-toggle=search]:checked~.md-header .md-search__inner{height:100%;opacity:1;transform:translateX(0);transition:width 0ms 0ms,height 0ms 0ms,transform .15s cubic-bezier(.1,.7,.1,1) .15s,opacity .15s .15s;width:100%}}@media screen and (min-width:60em){[dir=ltr] .md-search__inner{float:right}[dir=rtl] .md-search__inner{float:left}.md-search__inner{padding:.1rem 0;position:relative;transition:width .25s cubic-bezier(.1,.7,.1,1);width:11.7rem}}@media screen and (min-width:60em) and (max-width:76.234375em){[data-md-toggle=search]:checked~.md-header .md-search__inner{width:23.4rem}}@media screen and (min-width:76.25em){[data-md-toggle=search]:checked~.md-header .md-search__inner{width:34.4rem}}.md-search__form{background-color:var(--md-default-bg-color);box-shadow:0 0 .6rem #0000;height:2.4rem;position:relative;transition:color .25s,background-color .25s;z-index:2}@media screen and (min-width:60em){.md-search__form{background-color:#00000042;border-radius:.1rem;height:1.8rem}.md-search__form:hover{background-color:#ffffff1f}}[data-md-toggle=search]:checked~.md-header .md-search__form{background-color:var(--md-default-bg-color);border-radius:.1rem .1rem 0 0;box-shadow:0 0 .6rem #00000012;color:var(--md-default-fg-color)}[dir=ltr] .md-search__input{padding-left:3.6rem;padding-right:2.2rem}[dir=rtl] .md-search__input{padding-left:2.2rem;padding-right:3.6rem}.md-search__input{background:#0000;font-size:.9rem;height:100%;position:relative;text-overflow:ellipsis;width:100%;z-index:2}.md-search__input::placeholder{transition:color .25s}.md-search__input::placeholder,.md-search__input~.md-search__icon{color:var(--md-default-fg-color--light)}.md-search__input::-ms-clear{display:none}@media screen and (max-width:59.984375em){.md-search__input{font-size:.9rem;height:2.4rem;width:100%}}@media screen and (min-width:60em){[dir=ltr] .md-search__input{padding-left:2.2rem}[dir=rtl] .md-search__input{padding-right:2.2rem}.md-search__input{color:inherit;font-size:.8rem}.md-search__input::placeholder{color:var(--md-primary-bg-color--light)}.md-search__input+.md-search__icon{color:var(--md-primary-bg-color)}[data-md-toggle=search]:checked~.md-header .md-search__input{text-overflow:clip}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon{color:var(--md-default-fg-color--light)}[data-md-toggle=search]:checked~.md-header .md-search__input::placeholder{color:#0000}}.md-search__icon{cursor:pointer;display:inline-block;height:1.2rem;transition:color .25s,opacity .25s;width:1.2rem}.md-search__icon:hover{opacity:.7}[dir=ltr] .md-search__icon[for=__search]{left:.5rem}[dir=rtl] .md-search__icon[for=__search]{right:.5rem}.md-search__icon[for=__search]{position:absolute;top:.3rem;z-index:2}[dir=rtl] .md-search__icon[for=__search] svg{transform:scaleX(-1)}@media screen and (max-width:59.984375em){[dir=ltr] .md-search__icon[for=__search]{left:.8rem}[dir=rtl] .md-search__icon[for=__search]{right:.8rem}.md-search__icon[for=__search]{top:.6rem}.md-search__icon[for=__search] svg:first-child{display:none}}@media screen and (min-width:60em){.md-search__icon[for=__search]{pointer-events:none}.md-search__icon[for=__search] svg:last-child{display:none}}[dir=ltr] .md-search__options{right:.5rem}[dir=rtl] .md-search__options{left:.5rem}.md-search__options{pointer-events:none;position:absolute;top:.3rem;z-index:2}@media screen and (max-width:59.984375em){[dir=ltr] .md-search__options{right:.8rem}[dir=rtl] .md-search__options{left:.8rem}.md-search__options{top:.6rem}}[dir=ltr] .md-search__options>.md-icon{margin-left:.2rem}[dir=rtl] .md-search__options>.md-icon{margin-right:.2rem}.md-search__options>.md-icon{color:var(--md-default-fg-color--light);opacity:0;transform:scale(.75);transition:transform .15s cubic-bezier(.1,.7,.1,1),opacity .15s}.md-search__options>.md-icon:not(.focus-visible){-webkit-tap-highlight-color:transparent;outline:none}[data-md-toggle=search]:checked~.md-header .md-search__input:valid~.md-search__options>.md-icon{opacity:1;pointer-events:auto;transform:scale(1)}[data-md-toggle=search]:checked~.md-header .md-search__input:valid~.md-search__options>.md-icon:hover{opacity:.7}[dir=ltr] .md-search__suggest{padding-left:3.6rem;padding-right:2.2rem}[dir=rtl] .md-search__suggest{padding-left:2.2rem;padding-right:3.6rem}.md-search__suggest{align-items:center;color:var(--md-default-fg-color--lighter);display:flex;font-size:.9rem;height:100%;opacity:0;position:absolute;top:0;transition:opacity 50ms;white-space:nowrap;width:100%}@media screen and (min-width:60em){[dir=ltr] .md-search__suggest{padding-left:2.2rem}[dir=rtl] .md-search__suggest{padding-right:2.2rem}.md-search__suggest{font-size:.8rem}}[data-md-toggle=search]:checked~.md-header .md-search__suggest{opacity:1;transition:opacity .3s .1s}[dir=ltr] .md-search__output{border-bottom-left-radius:.1rem}[dir=ltr] .md-search__output,[dir=rtl] .md-search__output{border-bottom-right-radius:.1rem}[dir=rtl] .md-search__output{border-bottom-left-radius:.1rem}.md-search__output{overflow:hidden;position:absolute;width:100%;z-index:1}@media screen and (max-width:59.984375em){.md-search__output{bottom:0;top:2.4rem}}@media screen and (min-width:60em){.md-search__output{opacity:0;top:1.9rem;transition:opacity .4s}[data-md-toggle=search]:checked~.md-header .md-search__output{box-shadow:var(--md-shadow-z3);opacity:1}}.md-search__scrollwrap{-webkit-backface-visibility:hidden;backface-visibility:hidden;background-color:var(--md-default-bg-color);height:100%;overflow-y:auto;touch-action:pan-y}@media (-webkit-max-device-pixel-ratio:1),(max-resolution:1dppx){.md-search__scrollwrap{transform:translateZ(0)}}@media screen and (min-width:60em) and (max-width:76.234375em){.md-search__scrollwrap{width:23.4rem}}@media screen and (min-width:76.25em){.md-search__scrollwrap{width:34.4rem}}@media screen and (min-width:60em){.md-search__scrollwrap{max-height:0;scrollbar-color:var(--md-default-fg-color--lighter) #0000;scrollbar-width:thin}[data-md-toggle=search]:checked~.md-header .md-search__scrollwrap{max-height:75vh}.md-search__scrollwrap:hover{scrollbar-color:var(--md-accent-fg-color) #0000}.md-search__scrollwrap::-webkit-scrollbar{height:.2rem;width:.2rem}.md-search__scrollwrap::-webkit-scrollbar-thumb{background-color:var(--md-default-fg-color--lighter)}.md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:var(--md-accent-fg-color)}}.md-search-result{color:var(--md-default-fg-color);word-break:break-word}.md-search-result__meta{background-color:var(--md-default-fg-color--lightest);color:var(--md-default-fg-color--light);font-size:.64rem;line-height:1.8rem;padding:0 .8rem;scroll-snap-align:start}@media screen and (min-width:60em){[dir=ltr] .md-search-result__meta{padding-left:2.2rem}[dir=rtl] .md-search-result__meta{padding-right:2.2rem}}.md-search-result__list{list-style:none;margin:0;padding:0;-webkit-user-select:none;user-select:none}.md-search-result__item{box-shadow:0 -.05rem var(--md-default-fg-color--lightest)}.md-search-result__item:first-child{box-shadow:none}.md-search-result__link{display:block;outline:none;scroll-snap-align:start;transition:background-color .25s}.md-search-result__link:focus,.md-search-result__link:hover{background-color:var(--md-accent-fg-color--transparent)}.md-search-result__link:last-child p:last-child{margin-bottom:.6rem}.md-search-result__more>summary{cursor:pointer;display:block;outline:none;position:sticky;scroll-snap-align:start;top:0;z-index:1}.md-search-result__more>summary::marker{display:none}.md-search-result__more>summary::-webkit-details-marker{display:none}.md-search-result__more>summary>div{color:var(--md-typeset-a-color);font-size:.64rem;padding:.75em .8rem;transition:color .25s,background-color .25s}@media screen and (min-width:60em){[dir=ltr] .md-search-result__more>summary>div{padding-left:2.2rem}[dir=rtl] .md-search-result__more>summary>div{padding-right:2.2rem}}.md-search-result__more>summary:focus>div,.md-search-result__more>summary:hover>div{background-color:var(--md-accent-fg-color--transparent);color:var(--md-accent-fg-color)}.md-search-result__more[open]>summary{background-color:var(--md-default-bg-color)}.md-search-result__article{overflow:hidden;padding:0 .8rem;position:relative}@media screen and (min-width:60em){[dir=ltr] .md-search-result__article{padding-left:2.2rem}[dir=rtl] .md-search-result__article{padding-right:2.2rem}}[dir=ltr] .md-search-result__icon{left:0}[dir=rtl] .md-search-result__icon{right:0}.md-search-result__icon{color:var(--md-default-fg-color--light);height:1.2rem;margin:.5rem;position:absolute;width:1.2rem}@media screen and (max-width:59.984375em){.md-search-result__icon{display:none}}.md-search-result__icon:after{background-color:currentcolor;content:"";display:inline-block;height:100%;-webkit-mask-image:var(--md-search-result-icon);mask-image:var(--md-search-result-icon);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;width:100%}[dir=rtl] .md-search-result__icon:after{transform:scaleX(-1)}.md-search-result .md-typeset{color:var(--md-default-fg-color--light);font-size:.64rem;line-height:1.6}.md-search-result .md-typeset h1{color:var(--md-default-fg-color);font-size:.8rem;font-weight:400;line-height:1.4;margin:.55rem 0}.md-search-result .md-typeset h1 mark{text-decoration:none}.md-search-result .md-typeset h2{color:var(--md-default-fg-color);font-size:.64rem;font-weight:700;line-height:1.6;margin:.5em 0}.md-search-result .md-typeset h2 mark{text-decoration:none}.md-search-result__terms{color:var(--md-default-fg-color);display:block;font-size:.64rem;font-style:italic;margin:.5em 0}.md-search-result mark{background-color:initial;color:var(--md-accent-fg-color);text-decoration:underline}.md-select{position:relative;z-index:1}.md-select__inner{background-color:var(--md-default-bg-color);border-radius:.1rem;box-shadow:var(--md-shadow-z2);color:var(--md-default-fg-color);left:50%;margin-top:.2rem;max-height:0;opacity:0;position:absolute;top:calc(100% - .2rem);transform:translate3d(-50%,.3rem,0);transition:transform .25s 375ms,opacity .25s .25s,max-height 0ms .5s}.md-select:focus-within .md-select__inner,.md-select:hover .md-select__inner{max-height:10rem;opacity:1;transform:translate3d(-50%,0,0);transition:transform .25s cubic-bezier(.1,.7,.1,1),opacity .25s,max-height 0ms}.md-select__inner:after{border-bottom:.2rem solid #0000;border-bottom-color:var(--md-default-bg-color);border-left:.2rem solid #0000;border-right:.2rem solid #0000;border-top:0;content:"";height:0;left:50%;margin-left:-.2rem;margin-top:-.2rem;position:absolute;top:0;width:0}.md-select__list{border-radius:.1rem;font-size:.8rem;list-style-type:none;margin:0;max-height:inherit;overflow:auto;padding:0}.md-select__item{line-height:1.8rem}[dir=ltr] .md-select__link{padding-left:.6rem;padding-right:1.2rem}[dir=rtl] .md-select__link{padding-left:1.2rem;padding-right:.6rem}.md-select__link{cursor:pointer;display:block;outline:none;scroll-snap-align:start;transition:background-color .25s,color .25s;width:100%}.md-select__link:focus,.md-select__link:hover{color:var(--md-accent-fg-color)}.md-select__link:focus{background-color:var(--md-default-fg-color--lightest)}.md-sidebar{align-self:flex-start;flex-shrink:0;padding:1.2rem 0;position:sticky;top:2.4rem;width:12.1rem}@media print{.md-sidebar{display:none}}@media screen and (max-width:76.234375em){[dir=ltr] .md-sidebar--primary{left:-12.1rem}[dir=rtl] .md-sidebar--primary{right:-12.1rem}.md-sidebar--primary{background-color:var(--md-default-bg-color);display:block;height:100%;position:fixed;top:0;transform:translateX(0);transition:transform .25s cubic-bezier(.4,0,.2,1),box-shadow .25s;width:12.1rem;z-index:5}[data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{box-shadow:var(--md-shadow-z3);transform:translateX(12.1rem)}[dir=rtl] [data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{transform:translateX(-12.1rem)}.md-sidebar--primary .md-sidebar__scrollwrap{bottom:0;left:0;margin:0;overflow:hidden;position:absolute;right:0;scroll-snap-type:none;top:0}}@media screen and (min-width:76.25em){.md-sidebar{height:0}.no-js .md-sidebar{height:auto}.md-header--lifted~.md-container .md-sidebar{top:4.8rem}}.md-sidebar--secondary{display:none;order:2}@media screen and (min-width:60em){.md-sidebar--secondary{height:0}.no-js .md-sidebar--secondary{height:auto}.md-sidebar--secondary:not([hidden]){display:block}.md-sidebar--secondary .md-sidebar__scrollwrap{touch-action:pan-y}}.md-sidebar__scrollwrap{scrollbar-gutter:stable;-webkit-backface-visibility:hidden;backface-visibility:hidden;margin:0 .2rem;overflow-y:auto;scrollbar-color:var(--md-default-fg-color--lighter) #0000;scrollbar-width:thin}.md-sidebar__scrollwrap::-webkit-scrollbar{height:.2rem;width:.2rem}.md-sidebar__scrollwrap:focus-within,.md-sidebar__scrollwrap:hover{scrollbar-color:var(--md-accent-fg-color) #0000}.md-sidebar__scrollwrap:focus-within::-webkit-scrollbar-thumb,.md-sidebar__scrollwrap:hover::-webkit-scrollbar-thumb{background-color:var(--md-default-fg-color--lighter)}.md-sidebar__scrollwrap:focus-within::-webkit-scrollbar-thumb:hover,.md-sidebar__scrollwrap:hover::-webkit-scrollbar-thumb:hover{background-color:var(--md-accent-fg-color)}@supports selector(::-webkit-scrollbar){.md-sidebar__scrollwrap{scrollbar-gutter:auto}[dir=ltr] .md-sidebar__inner{padding-right:calc(100% - 11.5rem)}[dir=rtl] .md-sidebar__inner{padding-left:calc(100% - 11.5rem)}}@media screen and (max-width:76.234375em){.md-overlay{background-color:#0000008a;height:0;opacity:0;position:fixed;top:0;transition:width 0ms .25s,height 0ms .25s,opacity .25s;width:0;z-index:5}[data-md-toggle=drawer]:checked~.md-overlay{height:100%;opacity:1;transition:width 0ms,height 0ms,opacity .25s;width:100%}}@keyframes facts{0%{height:0}to{height:.65rem}}@keyframes fact{0%{opacity:0;transform:translateY(100%)}50%{opacity:0}to{opacity:1;transform:translateY(0)}}:root{--md-source-forks-icon:url('data:image/svg+xml;charset=utf-8,');--md-source-repositories-icon:url('data:image/svg+xml;charset=utf-8,');--md-source-stars-icon:url('data:image/svg+xml;charset=utf-8,');--md-source-version-icon:url('data:image/svg+xml;charset=utf-8,')}.md-source{-webkit-backface-visibility:hidden;backface-visibility:hidden;display:block;font-size:.65rem;line-height:1.2;outline-color:var(--md-accent-fg-color);transition:opacity .25s;white-space:nowrap}.md-source:hover{opacity:.7}.md-source__icon{display:inline-block;height:2.4rem;vertical-align:middle;width:2rem}[dir=ltr] .md-source__icon svg{margin-left:.6rem}[dir=rtl] .md-source__icon svg{margin-right:.6rem}.md-source__icon svg{margin-top:.6rem}[dir=ltr] .md-source__icon+.md-source__repository{padding-left:2rem}[dir=rtl] .md-source__icon+.md-source__repository{padding-right:2rem}[dir=ltr] .md-source__icon+.md-source__repository{margin-left:-2rem}[dir=rtl] .md-source__icon+.md-source__repository{margin-right:-2rem}[dir=ltr] .md-source__repository{margin-left:.6rem}[dir=rtl] .md-source__repository{margin-right:.6rem}.md-source__repository{display:inline-block;max-width:calc(100% - 1.2rem);overflow:hidden;text-overflow:ellipsis;vertical-align:middle}.md-source__facts{display:flex;font-size:.55rem;gap:.4rem;list-style-type:none;margin:.1rem 0 0;opacity:.75;overflow:hidden;padding:0;width:100%}.md-source__repository--active .md-source__facts{animation:facts .25s ease-in}.md-source__fact{overflow:hidden;text-overflow:ellipsis}.md-source__repository--active .md-source__fact{animation:fact .4s ease-out}[dir=ltr] .md-source__fact:before{margin-right:.1rem}[dir=rtl] .md-source__fact:before{margin-left:.1rem}.md-source__fact:before{background-color:currentcolor;content:"";display:inline-block;height:.6rem;-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;vertical-align:text-top;width:.6rem}.md-source__fact:nth-child(1n+2){flex-shrink:0}.md-source__fact--version:before{-webkit-mask-image:var(--md-source-version-icon);mask-image:var(--md-source-version-icon)}.md-source__fact--stars:before{-webkit-mask-image:var(--md-source-stars-icon);mask-image:var(--md-source-stars-icon)}.md-source__fact--forks:before{-webkit-mask-image:var(--md-source-forks-icon);mask-image:var(--md-source-forks-icon)}.md-source__fact--repositories:before{-webkit-mask-image:var(--md-source-repositories-icon);mask-image:var(--md-source-repositories-icon)}.md-source-file{margin:1em 0}[dir=ltr] .md-source-file__fact{margin-right:.6rem}[dir=rtl] .md-source-file__fact{margin-left:.6rem}.md-source-file__fact{align-items:center;color:var(--md-default-fg-color--light);display:inline-flex;font-size:.68rem;gap:.3rem}.md-source-file__fact .md-icon{flex-shrink:0;margin-bottom:.05rem}[dir=ltr] .md-source-file__fact .md-author{float:left}[dir=rtl] .md-source-file__fact .md-author{float:right}.md-source-file__fact .md-author{margin-right:.2rem}.md-source-file__fact svg{width:.9rem}:root{--md-status:url('data:image/svg+xml;charset=utf-8,');--md-status--new:url('data:image/svg+xml;charset=utf-8,');--md-status--deprecated:url('data:image/svg+xml;charset=utf-8,');--md-status--encrypted:url('data:image/svg+xml;charset=utf-8,')}.md-status:after{background-color:var(--md-default-fg-color--light);content:"";display:inline-block;height:1.125em;-webkit-mask-image:var(--md-status);mask-image:var(--md-status);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;vertical-align:text-bottom;width:1.125em}.md-status:hover:after{background-color:currentcolor}.md-status--new:after{-webkit-mask-image:var(--md-status--new);mask-image:var(--md-status--new)}.md-status--deprecated:after{-webkit-mask-image:var(--md-status--deprecated);mask-image:var(--md-status--deprecated)}.md-status--encrypted:after{-webkit-mask-image:var(--md-status--encrypted);mask-image:var(--md-status--encrypted)}.md-tabs{background-color:var(--md-primary-fg-color);color:var(--md-primary-bg-color);display:block;line-height:1.3;overflow:auto;width:100%;z-index:3}@media print{.md-tabs{display:none}}@media screen and (max-width:76.234375em){.md-tabs{display:none}}.md-tabs[hidden]{pointer-events:none}[dir=ltr] .md-tabs__list{margin-left:.2rem}[dir=rtl] .md-tabs__list{margin-right:.2rem}.md-tabs__list{contain:content;display:flex;list-style:none;margin:0;overflow:auto;padding:0;scrollbar-width:none;white-space:nowrap}.md-tabs__list::-webkit-scrollbar{display:none}.md-tabs__item{height:2.4rem;padding-left:.6rem;padding-right:.6rem}.md-tabs__item--active .md-tabs__link{color:inherit;opacity:1}.md-tabs__link{-webkit-backface-visibility:hidden;backface-visibility:hidden;display:flex;font-size:.7rem;margin-top:.8rem;opacity:.7;outline-color:var(--md-accent-fg-color);outline-offset:.2rem;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s}.md-tabs__link:focus,.md-tabs__link:hover{color:inherit;opacity:1}[dir=ltr] .md-tabs__link svg{margin-right:.4rem}[dir=rtl] .md-tabs__link svg{margin-left:.4rem}.md-tabs__link svg{fill:currentcolor;height:1.3em}.md-tabs__item:nth-child(2) .md-tabs__link{transition-delay:20ms}.md-tabs__item:nth-child(3) .md-tabs__link{transition-delay:40ms}.md-tabs__item:nth-child(4) .md-tabs__link{transition-delay:60ms}.md-tabs__item:nth-child(5) .md-tabs__link{transition-delay:80ms}.md-tabs__item:nth-child(6) .md-tabs__link{transition-delay:.1s}.md-tabs__item:nth-child(7) .md-tabs__link{transition-delay:.12s}.md-tabs__item:nth-child(8) .md-tabs__link{transition-delay:.14s}.md-tabs__item:nth-child(9) .md-tabs__link{transition-delay:.16s}.md-tabs__item:nth-child(10) .md-tabs__link{transition-delay:.18s}.md-tabs__item:nth-child(11) .md-tabs__link{transition-delay:.2s}.md-tabs__item:nth-child(12) .md-tabs__link{transition-delay:.22s}.md-tabs__item:nth-child(13) .md-tabs__link{transition-delay:.24s}.md-tabs__item:nth-child(14) .md-tabs__link{transition-delay:.26s}.md-tabs__item:nth-child(15) .md-tabs__link{transition-delay:.28s}.md-tabs__item:nth-child(16) .md-tabs__link{transition-delay:.3s}.md-tabs[hidden] .md-tabs__link{opacity:0;transform:translateY(50%);transition:transform 0ms .1s,opacity .1s}:root{--md-tag-icon:url('data:image/svg+xml;charset=utf-8,')}.md-typeset .md-tags:not([hidden]){display:inline-flex;flex-wrap:wrap;gap:.5em;margin-bottom:.75em;margin-top:-.125em}.md-typeset .md-tag{align-items:center;background:var(--md-default-fg-color--lightest);border-radius:2.4rem;display:inline-flex;font-size:.64rem;font-size:min(.8em,.64rem);font-weight:700;gap:.5em;letter-spacing:normal;line-height:1.6;padding:.3125em .78125em}.md-typeset .md-tag[href]{-webkit-tap-highlight-color:transparent;color:inherit;outline:none;transition:color 125ms,background-color 125ms}.md-typeset .md-tag[href]:focus,.md-typeset .md-tag[href]:hover{background-color:var(--md-accent-fg-color);color:var(--md-accent-bg-color)}[id]>.md-typeset .md-tag{vertical-align:text-top}.md-typeset .md-tag-shadow{opacity:.5}.md-typeset .md-tag-icon:before{background-color:var(--md-default-fg-color--lighter);content:"";display:inline-block;height:1.2em;-webkit-mask-image:var(--md-tag-icon);mask-image:var(--md-tag-icon);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;transition:background-color 125ms;vertical-align:text-bottom;width:1.2em}.md-typeset .md-tag-icon[href]:focus:before,.md-typeset .md-tag-icon[href]:hover:before{background-color:var(--md-accent-bg-color)}@keyframes pulse{0%{transform:scale(.95)}75%{transform:scale(1)}to{transform:scale(.95)}}:root{--md-annotation-bg-icon:url('data:image/svg+xml;charset=utf-8,');--md-annotation-icon:url('data:image/svg+xml;charset=utf-8,')}.md-tooltip{-webkit-backface-visibility:hidden;backface-visibility:hidden;background-color:var(--md-default-bg-color);border-radius:.1rem;box-shadow:var(--md-shadow-z2);color:var(--md-default-fg-color);font-family:var(--md-text-font-family);left:clamp(var(--md-tooltip-0,0rem) + .8rem,var(--md-tooltip-x),100vw + var(--md-tooltip-0,0rem) + .8rem - var(--md-tooltip-width) - 2 * .8rem);max-width:calc(100vw - 1.6rem);opacity:0;position:absolute;top:var(--md-tooltip-y);transform:translateY(-.4rem);transition:transform 0ms .25s,opacity .25s,z-index .25s;width:var(--md-tooltip-width);z-index:0}.md-tooltip--active{opacity:1;transform:translateY(0);transition:transform .25s cubic-bezier(.1,.7,.1,1),opacity .25s,z-index 0ms;z-index:2}.md-tooltip--inline{font-weight:700;-webkit-user-select:none;user-select:none;width:auto}.md-tooltip--inline:not(.md-tooltip--active){transform:translateY(.2rem) scale(.9)}.md-tooltip--inline .md-tooltip__inner{font-size:.5rem;padding:.2rem .4rem}[hidden]+.md-tooltip--inline{display:none}.focus-visible>.md-tooltip,.md-tooltip:target{outline:var(--md-accent-fg-color) auto}.md-tooltip__inner{font-size:.64rem;padding:.8rem}.md-tooltip__inner.md-typeset>:first-child{margin-top:0}.md-tooltip__inner.md-typeset>:last-child{margin-bottom:0}.md-annotation{font-weight:400;outline:none;vertical-align:text-bottom;white-space:normal}[dir=rtl] .md-annotation{direction:rtl}code .md-annotation{font-family:var(--md-code-font-family);font-size:inherit}.md-annotation:not([hidden]){display:inline-block;line-height:1.25}.md-annotation__index{border-radius:.01px;cursor:pointer;display:inline-block;margin-left:.4ch;margin-right:.4ch;outline:none;overflow:hidden;position:relative;-webkit-user-select:none;user-select:none;vertical-align:text-top;z-index:0}.md-annotation .md-annotation__index{transition:z-index .25s}@media screen{.md-annotation__index{width:2.2ch}[data-md-visible]>.md-annotation__index{animation:pulse 2s infinite}.md-annotation__index:before{background:var(--md-default-bg-color);-webkit-mask-image:var(--md-annotation-bg-icon);mask-image:var(--md-annotation-bg-icon)}.md-annotation__index:after,.md-annotation__index:before{content:"";height:2.2ch;-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;position:absolute;top:-.1ch;width:2.2ch;z-index:-1}.md-annotation__index:after{background-color:var(--md-default-fg-color--lighter);-webkit-mask-image:var(--md-annotation-icon);mask-image:var(--md-annotation-icon);transform:scale(1.0001);transition:background-color .25s,transform .25s}.md-tooltip--active+.md-annotation__index:after{transform:rotate(45deg)}.md-tooltip--active+.md-annotation__index:after,:hover>.md-annotation__index:after{background-color:var(--md-accent-fg-color)}}.md-tooltip--active+.md-annotation__index{animation-play-state:paused;transition-duration:0ms;z-index:2}.md-annotation__index [data-md-annotation-id]{display:inline-block}@media print{.md-annotation__index [data-md-annotation-id]{background:var(--md-default-fg-color--lighter);border-radius:2ch;color:var(--md-default-bg-color);font-weight:700;padding:0 .6ch;white-space:nowrap}.md-annotation__index [data-md-annotation-id]:after{content:attr(data-md-annotation-id)}}.md-typeset .md-annotation-list{counter-reset:xxx;list-style:none}.md-typeset .md-annotation-list li{position:relative}[dir=ltr] .md-typeset .md-annotation-list li:before{left:-2.125em}[dir=rtl] .md-typeset .md-annotation-list li:before{right:-2.125em}.md-typeset .md-annotation-list li:before{background:var(--md-default-fg-color--lighter);border-radius:2ch;color:var(--md-default-bg-color);content:counter(xxx);counter-increment:xxx;font-size:.8875em;font-weight:700;height:2ch;line-height:1.25;min-width:2ch;padding:0 .6ch;position:absolute;text-align:center;top:.25em}:root{--md-tooltip-width:20rem;--md-tooltip-tail:0.3rem}.md-tooltip2{-webkit-backface-visibility:hidden;backface-visibility:hidden;color:var(--md-default-fg-color);font-family:var(--md-text-font-family);opacity:0;pointer-events:none;position:absolute;top:calc(var(--md-tooltip-host-y) + var(--md-tooltip-y));transform:translateY(-.4rem);transform-origin:calc(var(--md-tooltip-host-x) + var(--md-tooltip-x)) 0;transition:transform 0ms .25s,opacity .25s,z-index .25s;width:100%;z-index:0}.md-tooltip2:before{border-left:var(--md-tooltip-tail) solid #0000;border-right:var(--md-tooltip-tail) solid #0000;content:"";display:block;left:clamp(1.5 * .8rem,var(--md-tooltip-host-x) + var(--md-tooltip-x) - var(--md-tooltip-tail),100vw - 2 * var(--md-tooltip-tail) - 1.5 * .8rem);position:absolute;z-index:1}.md-tooltip2--top:before{border-top:var(--md-tooltip-tail) solid var(--md-default-bg-color);bottom:calc(var(--md-tooltip-tail)*-1 + .025rem);filter:drop-shadow(0 1px 0 hsla(0,0%,0%,.05))}.md-tooltip2--bottom:before{border-bottom:var(--md-tooltip-tail) solid var(--md-default-bg-color);filter:drop-shadow(0 -1px 0 hsla(0,0%,0%,.05));top:calc(var(--md-tooltip-tail)*-1 + .025rem)}.md-tooltip2--active{opacity:1;transform:translateY(0);transition:transform .4s cubic-bezier(0,1,.5,1),opacity .25s,z-index 0ms;z-index:2}.md-tooltip2__inner{scrollbar-gutter:stable;background-color:var(--md-default-bg-color);border-radius:.1rem;box-shadow:var(--md-shadow-z2);left:clamp(.8rem,var(--md-tooltip-host-x) - .8rem,100vw - var(--md-tooltip-width) - .8rem);max-height:40vh;max-width:calc(100vw - 1.6rem);position:relative;scrollbar-width:thin}.md-tooltip2__inner::-webkit-scrollbar{height:.2rem;width:.2rem}.md-tooltip2__inner::-webkit-scrollbar-thumb{background-color:var(--md-default-fg-color--lighter)}.md-tooltip2__inner::-webkit-scrollbar-thumb:hover{background-color:var(--md-accent-fg-color)}[role=dialog]>.md-tooltip2__inner{font-size:.64rem;overflow:auto;padding:0 .8rem;pointer-events:auto;width:var(--md-tooltip-width)}[role=dialog]>.md-tooltip2__inner:after,[role=dialog]>.md-tooltip2__inner:before{content:"";display:block;height:.8rem;position:sticky;width:100%;z-index:10}[role=dialog]>.md-tooltip2__inner:before{background:linear-gradient(var(--md-default-bg-color),#0000 75%);top:0}[role=dialog]>.md-tooltip2__inner:after{background:linear-gradient(#0000,var(--md-default-bg-color) 75%);bottom:0}[role=tooltip]>.md-tooltip2__inner{font-size:.5rem;font-weight:700;left:clamp(.8rem,var(--md-tooltip-host-x) + var(--md-tooltip-x) - var(--md-tooltip-width)/2,100vw - var(--md-tooltip-width) - .8rem);max-width:min(100vw - 2 * .8rem,400px);padding:.2rem .4rem;-webkit-user-select:none;user-select:none;width:-moz-fit-content;width:fit-content}.md-tooltip2__inner.md-typeset>:first-child{margin-top:0}.md-tooltip2__inner.md-typeset>:last-child{margin-bottom:0}[dir=ltr] .md-top{margin-left:50%}[dir=rtl] .md-top{margin-right:50%}.md-top{background-color:var(--md-default-bg-color);border-radius:1.6rem;box-shadow:var(--md-shadow-z2);color:var(--md-default-fg-color--light);cursor:pointer;display:block;font-size:.7rem;outline:none;padding:.4rem .8rem;position:fixed;top:3.2rem;transform:translate(-50%);transition:color 125ms,background-color 125ms,transform 125ms cubic-bezier(.4,0,.2,1),opacity 125ms;z-index:2}@media print{.md-top{display:none}}[dir=rtl] .md-top{transform:translate(50%)}.md-top[hidden]{opacity:0;pointer-events:none;transform:translate(-50%,.2rem);transition-duration:0ms}[dir=rtl] .md-top[hidden]{transform:translate(50%,.2rem)}.md-top:focus,.md-top:hover{background-color:var(--md-accent-fg-color);color:var(--md-accent-bg-color)}.md-top svg{display:inline-block;vertical-align:-.5em}@keyframes hoverfix{0%{pointer-events:none}}:root{--md-version-icon:url('data:image/svg+xml;charset=utf-8,')}.md-version{flex-shrink:0;font-size:.8rem;height:2.4rem}[dir=ltr] .md-version__current{margin-left:1.4rem;margin-right:.4rem}[dir=rtl] .md-version__current{margin-left:.4rem;margin-right:1.4rem}.md-version__current{color:inherit;cursor:pointer;outline:none;position:relative;top:.05rem}[dir=ltr] .md-version__current:after{margin-left:.4rem}[dir=rtl] .md-version__current:after{margin-right:.4rem}.md-version__current:after{background-color:currentcolor;content:"";display:inline-block;height:.6rem;-webkit-mask-image:var(--md-version-icon);mask-image:var(--md-version-icon);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;width:.4rem}.md-version__alias{margin-left:.3rem;opacity:.7}.md-version__list{background-color:var(--md-default-bg-color);border-radius:.1rem;box-shadow:var(--md-shadow-z2);color:var(--md-default-fg-color);list-style-type:none;margin:.2rem .8rem;max-height:0;opacity:0;overflow:auto;padding:0;position:absolute;scroll-snap-type:y mandatory;top:.15rem;transition:max-height 0ms .5s,opacity .25s .25s;z-index:3}.md-version:focus-within .md-version__list,.md-version:hover .md-version__list{max-height:10rem;opacity:1;transition:max-height 0ms,opacity .25s}@media (hover:none),(pointer:coarse){.md-version:hover .md-version__list{animation:hoverfix .25s forwards}.md-version:focus-within .md-version__list{animation:none}}.md-version__item{line-height:1.8rem}[dir=ltr] .md-version__link{padding-left:.6rem;padding-right:1.2rem}[dir=rtl] .md-version__link{padding-left:1.2rem;padding-right:.6rem}.md-version__link{cursor:pointer;display:block;outline:none;scroll-snap-align:start;transition:color .25s,background-color .25s;white-space:nowrap;width:100%}.md-version__link:focus,.md-version__link:hover{color:var(--md-accent-fg-color)}.md-version__link:focus{background-color:var(--md-default-fg-color--lightest)}:root{--md-admonition-icon--note:url('data:image/svg+xml;charset=utf-8,');--md-admonition-icon--abstract:url('data:image/svg+xml;charset=utf-8,');--md-admonition-icon--info:url('data:image/svg+xml;charset=utf-8,');--md-admonition-icon--tip:url('data:image/svg+xml;charset=utf-8,');--md-admonition-icon--success:url('data:image/svg+xml;charset=utf-8,');--md-admonition-icon--question:url('data:image/svg+xml;charset=utf-8,');--md-admonition-icon--warning:url('data:image/svg+xml;charset=utf-8,');--md-admonition-icon--failure:url('data:image/svg+xml;charset=utf-8,');--md-admonition-icon--danger:url('data:image/svg+xml;charset=utf-8,');--md-admonition-icon--bug:url('data:image/svg+xml;charset=utf-8,');--md-admonition-icon--example:url('data:image/svg+xml;charset=utf-8,');--md-admonition-icon--quote:url('data:image/svg+xml;charset=utf-8,')}.md-typeset .admonition,.md-typeset details{background-color:var(--md-admonition-bg-color);border:.075rem solid #448aff;border-radius:.2rem;box-shadow:var(--md-shadow-z1);color:var(--md-admonition-fg-color);display:flow-root;font-size:.64rem;margin:1.5625em 0;padding:0 .6rem;page-break-inside:avoid;transition:box-shadow 125ms}@media print{.md-typeset .admonition,.md-typeset details{box-shadow:none}}.md-typeset .admonition:focus-within,.md-typeset details:focus-within{box-shadow:0 0 0 .2rem #448aff1a}.md-typeset .admonition>*,.md-typeset details>*{box-sizing:border-box}.md-typeset .admonition .admonition,.md-typeset .admonition details,.md-typeset details .admonition,.md-typeset details details{margin-bottom:1em;margin-top:1em}.md-typeset .admonition .md-typeset__scrollwrap,.md-typeset details .md-typeset__scrollwrap{margin:1em -.6rem}.md-typeset .admonition .md-typeset__table,.md-typeset details .md-typeset__table{padding:0 .6rem}.md-typeset .admonition>.tabbed-set:only-child,.md-typeset details>.tabbed-set:only-child{margin-top:0}html .md-typeset .admonition>:last-child,html .md-typeset details>:last-child{margin-bottom:.6rem}[dir=ltr] .md-typeset .admonition-title,[dir=ltr] .md-typeset summary{padding-left:2rem;padding-right:.6rem}[dir=rtl] .md-typeset .admonition-title,[dir=rtl] .md-typeset summary{padding-left:.6rem;padding-right:2rem}[dir=ltr] .md-typeset .admonition-title,[dir=ltr] .md-typeset summary{border-left-width:.2rem}[dir=rtl] .md-typeset .admonition-title,[dir=rtl] .md-typeset summary{border-right-width:.2rem}[dir=ltr] .md-typeset .admonition-title,[dir=ltr] .md-typeset summary{border-top-left-radius:.1rem}[dir=ltr] .md-typeset .admonition-title,[dir=ltr] .md-typeset summary,[dir=rtl] .md-typeset .admonition-title,[dir=rtl] .md-typeset summary{border-top-right-radius:.1rem}[dir=rtl] .md-typeset .admonition-title,[dir=rtl] .md-typeset summary{border-top-left-radius:.1rem}.md-typeset .admonition-title,.md-typeset summary{background-color:#448aff1a;border:none;font-weight:700;margin:0 -.6rem;padding-bottom:.4rem;padding-top:.4rem;position:relative}html .md-typeset .admonition-title:last-child,html .md-typeset summary:last-child{margin-bottom:0}[dir=ltr] .md-typeset .admonition-title:before,[dir=ltr] .md-typeset summary:before{left:.6rem}[dir=rtl] .md-typeset .admonition-title:before,[dir=rtl] .md-typeset summary:before{right:.6rem}.md-typeset .admonition-title:before,.md-typeset summary:before{background-color:#448aff;content:"";height:1rem;-webkit-mask-image:var(--md-admonition-icon--note);mask-image:var(--md-admonition-icon--note);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;position:absolute;top:.625em;width:1rem}.md-typeset .admonition-title code,.md-typeset summary code{box-shadow:0 0 0 .05rem var(--md-default-fg-color--lightest)}.md-typeset .admonition.note,.md-typeset details.note{border-color:#448aff}.md-typeset .admonition.note:focus-within,.md-typeset details.note:focus-within{box-shadow:0 0 0 .2rem #448aff1a}.md-typeset .note>.admonition-title,.md-typeset .note>summary{background-color:#448aff1a}.md-typeset .note>.admonition-title:before,.md-typeset .note>summary:before{background-color:#448aff;-webkit-mask-image:var(--md-admonition-icon--note);mask-image:var(--md-admonition-icon--note)}.md-typeset .note>.admonition-title:after,.md-typeset .note>summary:after{color:#448aff}.md-typeset .admonition.abstract,.md-typeset details.abstract{border-color:#00b0ff}.md-typeset .admonition.abstract:focus-within,.md-typeset details.abstract:focus-within{box-shadow:0 0 0 .2rem #00b0ff1a}.md-typeset .abstract>.admonition-title,.md-typeset .abstract>summary{background-color:#00b0ff1a}.md-typeset .abstract>.admonition-title:before,.md-typeset .abstract>summary:before{background-color:#00b0ff;-webkit-mask-image:var(--md-admonition-icon--abstract);mask-image:var(--md-admonition-icon--abstract)}.md-typeset .abstract>.admonition-title:after,.md-typeset .abstract>summary:after{color:#00b0ff}.md-typeset .admonition.info,.md-typeset details.info{border-color:#00b8d4}.md-typeset .admonition.info:focus-within,.md-typeset details.info:focus-within{box-shadow:0 0 0 .2rem #00b8d41a}.md-typeset .info>.admonition-title,.md-typeset .info>summary{background-color:#00b8d41a}.md-typeset .info>.admonition-title:before,.md-typeset .info>summary:before{background-color:#00b8d4;-webkit-mask-image:var(--md-admonition-icon--info);mask-image:var(--md-admonition-icon--info)}.md-typeset .info>.admonition-title:after,.md-typeset .info>summary:after{color:#00b8d4}.md-typeset .admonition.tip,.md-typeset details.tip{border-color:#00bfa5}.md-typeset .admonition.tip:focus-within,.md-typeset details.tip:focus-within{box-shadow:0 0 0 .2rem #00bfa51a}.md-typeset .tip>.admonition-title,.md-typeset .tip>summary{background-color:#00bfa51a}.md-typeset .tip>.admonition-title:before,.md-typeset .tip>summary:before{background-color:#00bfa5;-webkit-mask-image:var(--md-admonition-icon--tip);mask-image:var(--md-admonition-icon--tip)}.md-typeset .tip>.admonition-title:after,.md-typeset .tip>summary:after{color:#00bfa5}.md-typeset .admonition.success,.md-typeset details.success{border-color:#00c853}.md-typeset .admonition.success:focus-within,.md-typeset details.success:focus-within{box-shadow:0 0 0 .2rem #00c8531a}.md-typeset .success>.admonition-title,.md-typeset .success>summary{background-color:#00c8531a}.md-typeset .success>.admonition-title:before,.md-typeset .success>summary:before{background-color:#00c853;-webkit-mask-image:var(--md-admonition-icon--success);mask-image:var(--md-admonition-icon--success)}.md-typeset .success>.admonition-title:after,.md-typeset .success>summary:after{color:#00c853}.md-typeset .admonition.question,.md-typeset details.question{border-color:#64dd17}.md-typeset .admonition.question:focus-within,.md-typeset details.question:focus-within{box-shadow:0 0 0 .2rem #64dd171a}.md-typeset .question>.admonition-title,.md-typeset .question>summary{background-color:#64dd171a}.md-typeset .question>.admonition-title:before,.md-typeset .question>summary:before{background-color:#64dd17;-webkit-mask-image:var(--md-admonition-icon--question);mask-image:var(--md-admonition-icon--question)}.md-typeset .question>.admonition-title:after,.md-typeset .question>summary:after{color:#64dd17}.md-typeset .admonition.warning,.md-typeset details.warning{border-color:#ff9100}.md-typeset .admonition.warning:focus-within,.md-typeset details.warning:focus-within{box-shadow:0 0 0 .2rem #ff91001a}.md-typeset .warning>.admonition-title,.md-typeset .warning>summary{background-color:#ff91001a}.md-typeset .warning>.admonition-title:before,.md-typeset .warning>summary:before{background-color:#ff9100;-webkit-mask-image:var(--md-admonition-icon--warning);mask-image:var(--md-admonition-icon--warning)}.md-typeset .warning>.admonition-title:after,.md-typeset .warning>summary:after{color:#ff9100}.md-typeset .admonition.failure,.md-typeset details.failure{border-color:#ff5252}.md-typeset .admonition.failure:focus-within,.md-typeset details.failure:focus-within{box-shadow:0 0 0 .2rem #ff52521a}.md-typeset .failure>.admonition-title,.md-typeset .failure>summary{background-color:#ff52521a}.md-typeset .failure>.admonition-title:before,.md-typeset .failure>summary:before{background-color:#ff5252;-webkit-mask-image:var(--md-admonition-icon--failure);mask-image:var(--md-admonition-icon--failure)}.md-typeset .failure>.admonition-title:after,.md-typeset .failure>summary:after{color:#ff5252}.md-typeset .admonition.danger,.md-typeset details.danger{border-color:#ff1744}.md-typeset .admonition.danger:focus-within,.md-typeset details.danger:focus-within{box-shadow:0 0 0 .2rem #ff17441a}.md-typeset .danger>.admonition-title,.md-typeset .danger>summary{background-color:#ff17441a}.md-typeset .danger>.admonition-title:before,.md-typeset .danger>summary:before{background-color:#ff1744;-webkit-mask-image:var(--md-admonition-icon--danger);mask-image:var(--md-admonition-icon--danger)}.md-typeset .danger>.admonition-title:after,.md-typeset .danger>summary:after{color:#ff1744}.md-typeset .admonition.bug,.md-typeset details.bug{border-color:#f50057}.md-typeset .admonition.bug:focus-within,.md-typeset details.bug:focus-within{box-shadow:0 0 0 .2rem #f500571a}.md-typeset .bug>.admonition-title,.md-typeset .bug>summary{background-color:#f500571a}.md-typeset .bug>.admonition-title:before,.md-typeset .bug>summary:before{background-color:#f50057;-webkit-mask-image:var(--md-admonition-icon--bug);mask-image:var(--md-admonition-icon--bug)}.md-typeset .bug>.admonition-title:after,.md-typeset .bug>summary:after{color:#f50057}.md-typeset .admonition.example,.md-typeset details.example{border-color:#7c4dff}.md-typeset .admonition.example:focus-within,.md-typeset details.example:focus-within{box-shadow:0 0 0 .2rem #7c4dff1a}.md-typeset .example>.admonition-title,.md-typeset .example>summary{background-color:#7c4dff1a}.md-typeset .example>.admonition-title:before,.md-typeset .example>summary:before{background-color:#7c4dff;-webkit-mask-image:var(--md-admonition-icon--example);mask-image:var(--md-admonition-icon--example)}.md-typeset .example>.admonition-title:after,.md-typeset .example>summary:after{color:#7c4dff}.md-typeset .admonition.quote,.md-typeset details.quote{border-color:#9e9e9e}.md-typeset .admonition.quote:focus-within,.md-typeset details.quote:focus-within{box-shadow:0 0 0 .2rem #9e9e9e1a}.md-typeset .quote>.admonition-title,.md-typeset .quote>summary{background-color:#9e9e9e1a}.md-typeset .quote>.admonition-title:before,.md-typeset .quote>summary:before{background-color:#9e9e9e;-webkit-mask-image:var(--md-admonition-icon--quote);mask-image:var(--md-admonition-icon--quote)}.md-typeset .quote>.admonition-title:after,.md-typeset .quote>summary:after{color:#9e9e9e}:root{--md-footnotes-icon:url('data:image/svg+xml;charset=utf-8,')}.md-typeset .footnote{color:var(--md-default-fg-color--light);font-size:.64rem}[dir=ltr] .md-typeset .footnote>ol{margin-left:0}[dir=rtl] .md-typeset .footnote>ol{margin-right:0}.md-typeset .footnote>ol>li{transition:color 125ms}.md-typeset .footnote>ol>li:target{color:var(--md-default-fg-color)}.md-typeset .footnote>ol>li:focus-within .footnote-backref{opacity:1;transform:translateX(0);transition:none}.md-typeset .footnote>ol>li:hover .footnote-backref,.md-typeset .footnote>ol>li:target .footnote-backref{opacity:1;transform:translateX(0)}.md-typeset .footnote>ol>li>:first-child{margin-top:0}.md-typeset .footnote-ref{font-size:.75em;font-weight:700}html .md-typeset .footnote-ref{outline-offset:.1rem}.md-typeset [id^="fnref:"]:target>.footnote-ref{outline:auto}.md-typeset .footnote-backref{color:var(--md-typeset-a-color);display:inline-block;font-size:0;opacity:0;transform:translateX(.25rem);transition:color .25s,transform .25s .25s,opacity 125ms .25s;vertical-align:text-bottom}@media print{.md-typeset .footnote-backref{color:var(--md-typeset-a-color);opacity:1;transform:translateX(0)}}[dir=rtl] .md-typeset .footnote-backref{transform:translateX(-.25rem)}.md-typeset .footnote-backref:hover{color:var(--md-accent-fg-color)}.md-typeset .footnote-backref:before{background-color:currentcolor;content:"";display:inline-block;height:.8rem;-webkit-mask-image:var(--md-footnotes-icon);mask-image:var(--md-footnotes-icon);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;width:.8rem}[dir=rtl] .md-typeset .footnote-backref:before svg{transform:scaleX(-1)}[dir=ltr] .md-typeset .headerlink{margin-left:.5rem}[dir=rtl] .md-typeset .headerlink{margin-right:.5rem}.md-typeset .headerlink{color:var(--md-default-fg-color--lighter);display:inline-block;opacity:0;transition:color .25s,opacity 125ms}@media print{.md-typeset .headerlink{display:none}}.md-typeset .headerlink:focus,.md-typeset :hover>.headerlink,.md-typeset :target>.headerlink{opacity:1;transition:color .25s,opacity 125ms}.md-typeset .headerlink:focus,.md-typeset .headerlink:hover,.md-typeset :target>.headerlink{color:var(--md-accent-fg-color)}.md-typeset :target{--md-scroll-margin:3.6rem;--md-scroll-offset:0rem;scroll-margin-top:calc(var(--md-scroll-margin) - var(--md-scroll-offset))}@media screen and (min-width:76.25em){.md-header--lifted~.md-container .md-typeset :target{--md-scroll-margin:6rem}}.md-typeset h1:target,.md-typeset h2:target,.md-typeset h3:target{--md-scroll-offset:0.2rem}.md-typeset h4:target{--md-scroll-offset:0.15rem}.md-typeset div.arithmatex{overflow:auto}@media screen and (max-width:44.984375em){.md-typeset div.arithmatex{margin:0 -.8rem}.md-typeset div.arithmatex>*{width:min-content}}.md-typeset div.arithmatex>*{margin-left:auto!important;margin-right:auto!important;padding:0 .8rem;touch-action:auto}.md-typeset div.arithmatex>* mjx-container{margin:0!important}.md-typeset div.arithmatex mjx-assistive-mml{height:0}.md-typeset del.critic{background-color:var(--md-typeset-del-color)}.md-typeset del.critic,.md-typeset ins.critic{-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset ins.critic{background-color:var(--md-typeset-ins-color)}.md-typeset .critic.comment{-webkit-box-decoration-break:clone;box-decoration-break:clone;color:var(--md-code-hl-comment-color)}.md-typeset .critic.comment:before{content:"/* "}.md-typeset .critic.comment:after{content:" */"}.md-typeset .critic.block{box-shadow:none;display:block;margin:1em 0;overflow:auto;padding-left:.8rem;padding-right:.8rem}.md-typeset .critic.block>:first-child{margin-top:.5em}.md-typeset .critic.block>:last-child{margin-bottom:.5em}:root{--md-details-icon:url('data:image/svg+xml;charset=utf-8,')}.md-typeset details{display:flow-root;overflow:visible;padding-top:0}.md-typeset details[open]>summary:after{transform:rotate(90deg)}.md-typeset details:not([open]){box-shadow:none;padding-bottom:0}.md-typeset details:not([open])>summary{border-radius:.1rem}[dir=ltr] .md-typeset summary{padding-right:1.8rem}[dir=rtl] .md-typeset summary{padding-left:1.8rem}[dir=ltr] .md-typeset summary{border-top-left-radius:.1rem}[dir=ltr] .md-typeset summary,[dir=rtl] .md-typeset summary{border-top-right-radius:.1rem}[dir=rtl] .md-typeset summary{border-top-left-radius:.1rem}.md-typeset summary{cursor:pointer;display:block;min-height:1rem;overflow:hidden}.md-typeset summary.focus-visible{outline-color:var(--md-accent-fg-color);outline-offset:.2rem}.md-typeset summary:not(.focus-visible){-webkit-tap-highlight-color:transparent;outline:none}[dir=ltr] .md-typeset summary:after{right:.4rem}[dir=rtl] .md-typeset summary:after{left:.4rem}.md-typeset summary:after{background-color:currentcolor;content:"";height:1rem;-webkit-mask-image:var(--md-details-icon);mask-image:var(--md-details-icon);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;position:absolute;top:.625em;transform:rotate(0deg);transition:transform .25s;width:1rem}[dir=rtl] .md-typeset summary:after{transform:rotate(180deg)}.md-typeset summary::marker{display:none}.md-typeset summary::-webkit-details-marker{display:none}.md-typeset .emojione,.md-typeset .gemoji,.md-typeset .twemoji{--md-icon-size:1.125em;display:inline-flex;height:var(--md-icon-size);vertical-align:text-top}.md-typeset .emojione svg,.md-typeset .gemoji svg,.md-typeset .twemoji svg{fill:currentcolor;max-height:100%;width:var(--md-icon-size)}.md-typeset .lg,.md-typeset .xl,.md-typeset .xxl,.md-typeset .xxxl{vertical-align:text-bottom}.md-typeset .middle{vertical-align:middle}.md-typeset .lg{--md-icon-size:1.5em}.md-typeset .xl{--md-icon-size:2.25em}.md-typeset .xxl{--md-icon-size:3em}.md-typeset .xxxl{--md-icon-size:4em}.highlight .o,.highlight .ow{color:var(--md-code-hl-operator-color)}.highlight .p{color:var(--md-code-hl-punctuation-color)}.highlight .cpf,.highlight .l,.highlight .s,.highlight .s1,.highlight .s2,.highlight .sb,.highlight .sc,.highlight .si,.highlight .ss{color:var(--md-code-hl-string-color)}.highlight .cp,.highlight .se,.highlight .sh,.highlight .sr,.highlight .sx{color:var(--md-code-hl-special-color)}.highlight .il,.highlight .m,.highlight .mb,.highlight .mf,.highlight .mh,.highlight .mi,.highlight .mo{color:var(--md-code-hl-number-color)}.highlight .k,.highlight .kd,.highlight .kn,.highlight .kp,.highlight .kr,.highlight .kt{color:var(--md-code-hl-keyword-color)}.highlight .kc,.highlight .n{color:var(--md-code-hl-name-color)}.highlight .bp,.highlight .nb,.highlight .no{color:var(--md-code-hl-constant-color)}.highlight .nc,.highlight .ne,.highlight .nf,.highlight .nn{color:var(--md-code-hl-function-color)}.highlight .nd,.highlight .ni,.highlight .nl,.highlight .nt{color:var(--md-code-hl-keyword-color)}.highlight .c,.highlight .c1,.highlight .ch,.highlight .cm,.highlight .cs,.highlight .sd{color:var(--md-code-hl-comment-color)}.highlight .na,.highlight .nv,.highlight .vc,.highlight .vg,.highlight .vi{color:var(--md-code-hl-variable-color)}.highlight .ge,.highlight .gh,.highlight .go,.highlight .gp,.highlight .gr,.highlight .gs,.highlight .gt,.highlight .gu{color:var(--md-code-hl-generic-color)}.highlight .gd,.highlight .gi{border-radius:.1rem;margin:0 -.125em;padding:0 .125em}.highlight .gd{background-color:var(--md-typeset-del-color)}.highlight .gi{background-color:var(--md-typeset-ins-color)}.highlight .hll{background-color:var(--md-code-hl-color--light);box-shadow:2px 0 0 0 var(--md-code-hl-color) inset;display:block;margin:0 -1.1764705882em;padding:0 1.1764705882em}.highlight span.filename{background-color:var(--md-code-bg-color);border-bottom:.05rem solid var(--md-default-fg-color--lightest);border-top-left-radius:.1rem;border-top-right-radius:.1rem;display:flow-root;font-size:.85em;font-weight:700;margin-top:1em;padding:.6617647059em 1.1764705882em;position:relative}.highlight span.filename+pre{margin-top:0}.highlight span.filename+pre>code{border-top-left-radius:0;border-top-right-radius:0}.highlight [data-linenos]:before{background-color:var(--md-code-bg-color);box-shadow:-.05rem 0 var(--md-default-fg-color--lightest) inset;color:var(--md-default-fg-color--light);content:attr(data-linenos);float:left;left:-1.1764705882em;margin-left:-1.1764705882em;margin-right:1.1764705882em;padding-left:1.1764705882em;position:sticky;-webkit-user-select:none;user-select:none;z-index:3}.highlight code a[id]{position:absolute;visibility:hidden}.highlight code[data-md-copying]{display:initial}.highlight code[data-md-copying] .hll{display:contents}.highlight code[data-md-copying] .md-annotation{display:none}.highlighttable{display:flow-root}.highlighttable tbody,.highlighttable td{display:block;padding:0}.highlighttable tr{display:flex}.highlighttable pre{margin:0}.highlighttable th.filename{flex-grow:1;padding:0;text-align:left}.highlighttable th.filename span.filename{margin-top:0}.highlighttable .linenos{background-color:var(--md-code-bg-color);border-bottom-left-radius:.1rem;border-top-left-radius:.1rem;font-size:.85em;padding:.7720588235em 0 .7720588235em 1.1764705882em;-webkit-user-select:none;user-select:none}.highlighttable .linenodiv{box-shadow:-.05rem 0 var(--md-default-fg-color--lightest) inset}.highlighttable .linenodiv pre{color:var(--md-default-fg-color--light);text-align:right}.highlighttable .linenodiv span[class]{padding-right:.5882352941em}.highlighttable .code{flex:1;min-width:0}.linenodiv a{color:inherit}.md-typeset .highlighttable{direction:ltr;margin:1em 0}.md-typeset .highlighttable>tbody>tr>.code>div>pre>code{border-bottom-left-radius:0;border-top-left-radius:0}.md-typeset .highlight+.result{border:.05rem solid var(--md-code-bg-color);border-bottom-left-radius:.1rem;border-bottom-right-radius:.1rem;border-top-width:.1rem;margin-top:-1.125em;overflow:visible;padding:0 1em}.md-typeset .highlight+.result:after{clear:both;content:"";display:block}@media screen and (max-width:44.984375em){.md-content__inner>.highlight{margin:1em -.8rem}.md-content__inner>.highlight>.filename,.md-content__inner>.highlight>.highlighttable>tbody>tr>.code>div>pre>code,.md-content__inner>.highlight>.highlighttable>tbody>tr>.filename span.filename,.md-content__inner>.highlight>.highlighttable>tbody>tr>.linenos,.md-content__inner>.highlight>pre>code{border-radius:0}.md-content__inner>.highlight+.result{border-left-width:0;border-radius:0;border-right-width:0;margin-left:-.8rem;margin-right:-.8rem}}.md-typeset .keys kbd:after,.md-typeset .keys kbd:before{-moz-osx-font-smoothing:initial;-webkit-font-smoothing:initial;color:inherit;margin:0;position:relative}.md-typeset .keys span{color:var(--md-default-fg-color--light);padding:0 .2em}.md-typeset .keys .key-alt:before,.md-typeset .keys .key-left-alt:before,.md-typeset .keys .key-right-alt:before{content:"⎇";padding-right:.4em}.md-typeset .keys .key-command:before,.md-typeset .keys .key-left-command:before,.md-typeset .keys .key-right-command:before{content:"⌘";padding-right:.4em}.md-typeset .keys .key-control:before,.md-typeset .keys .key-left-control:before,.md-typeset .keys .key-right-control:before{content:"⌃";padding-right:.4em}.md-typeset .keys .key-left-meta:before,.md-typeset .keys .key-meta:before,.md-typeset .keys .key-right-meta:before{content:"◆";padding-right:.4em}.md-typeset .keys .key-left-option:before,.md-typeset .keys .key-option:before,.md-typeset .keys .key-right-option:before{content:"⌥";padding-right:.4em}.md-typeset .keys .key-left-shift:before,.md-typeset .keys .key-right-shift:before,.md-typeset .keys .key-shift:before{content:"⇧";padding-right:.4em}.md-typeset .keys .key-left-super:before,.md-typeset .keys .key-right-super:before,.md-typeset .keys .key-super:before{content:"❖";padding-right:.4em}.md-typeset .keys .key-left-windows:before,.md-typeset .keys .key-right-windows:before,.md-typeset .keys .key-windows:before{content:"⊞";padding-right:.4em}.md-typeset .keys .key-arrow-down:before{content:"↓";padding-right:.4em}.md-typeset .keys .key-arrow-left:before{content:"←";padding-right:.4em}.md-typeset .keys .key-arrow-right:before{content:"→";padding-right:.4em}.md-typeset .keys .key-arrow-up:before{content:"↑";padding-right:.4em}.md-typeset .keys .key-backspace:before{content:"⌫";padding-right:.4em}.md-typeset .keys .key-backtab:before{content:"⇤";padding-right:.4em}.md-typeset .keys .key-caps-lock:before{content:"⇪";padding-right:.4em}.md-typeset .keys .key-clear:before{content:"⌧";padding-right:.4em}.md-typeset .keys .key-context-menu:before{content:"☰";padding-right:.4em}.md-typeset .keys .key-delete:before{content:"⌦";padding-right:.4em}.md-typeset .keys .key-eject:before{content:"⏏";padding-right:.4em}.md-typeset .keys .key-end:before{content:"⤓";padding-right:.4em}.md-typeset .keys .key-escape:before{content:"⎋";padding-right:.4em}.md-typeset .keys .key-home:before{content:"⤒";padding-right:.4em}.md-typeset .keys .key-insert:before{content:"⎀";padding-right:.4em}.md-typeset .keys .key-page-down:before{content:"⇟";padding-right:.4em}.md-typeset .keys .key-page-up:before{content:"⇞";padding-right:.4em}.md-typeset .keys .key-print-screen:before{content:"⎙";padding-right:.4em}.md-typeset .keys .key-tab:after{content:"⇥";padding-left:.4em}.md-typeset .keys .key-num-enter:after{content:"⌤";padding-left:.4em}.md-typeset .keys .key-enter:after{content:"⏎";padding-left:.4em}:root{--md-tabbed-icon--prev:url('data:image/svg+xml;charset=utf-8,');--md-tabbed-icon--next:url('data:image/svg+xml;charset=utf-8,')}.md-typeset .tabbed-set{border-radius:.1rem;display:flex;flex-flow:column wrap;margin:1em 0;position:relative}.md-typeset .tabbed-set>input{height:0;opacity:0;position:absolute;width:0}.md-typeset .tabbed-set>input:target{--md-scroll-offset:0.625em}.md-typeset .tabbed-set>input.focus-visible~.tabbed-labels:before{background-color:var(--md-accent-fg-color)}.md-typeset .tabbed-labels{-ms-overflow-style:none;box-shadow:0 -.05rem var(--md-default-fg-color--lightest) inset;display:flex;max-width:100%;overflow:auto;scrollbar-width:none}@media print{.md-typeset .tabbed-labels{display:contents}}@media screen{.js .md-typeset .tabbed-labels{position:relative}.js .md-typeset .tabbed-labels:before{background:var(--md-default-fg-color);bottom:0;content:"";display:block;height:2px;left:0;position:absolute;transform:translateX(var(--md-indicator-x));transition:width 225ms,background-color .25s,transform .25s;transition-timing-function:cubic-bezier(.4,0,.2,1);width:var(--md-indicator-width)}}.md-typeset .tabbed-labels::-webkit-scrollbar{display:none}.md-typeset .tabbed-labels>label{border-bottom:.1rem solid #0000;border-radius:.1rem .1rem 0 0;color:var(--md-default-fg-color--light);cursor:pointer;flex-shrink:0;font-size:.64rem;font-weight:700;padding:.78125em 1.25em .625em;scroll-margin-inline-start:1rem;transition:background-color .25s,color .25s;white-space:nowrap;width:auto}@media print{.md-typeset .tabbed-labels>label:first-child{order:1}.md-typeset .tabbed-labels>label:nth-child(2){order:2}.md-typeset .tabbed-labels>label:nth-child(3){order:3}.md-typeset .tabbed-labels>label:nth-child(4){order:4}.md-typeset .tabbed-labels>label:nth-child(5){order:5}.md-typeset .tabbed-labels>label:nth-child(6){order:6}.md-typeset .tabbed-labels>label:nth-child(7){order:7}.md-typeset .tabbed-labels>label:nth-child(8){order:8}.md-typeset .tabbed-labels>label:nth-child(9){order:9}.md-typeset .tabbed-labels>label:nth-child(10){order:10}.md-typeset .tabbed-labels>label:nth-child(11){order:11}.md-typeset .tabbed-labels>label:nth-child(12){order:12}.md-typeset .tabbed-labels>label:nth-child(13){order:13}.md-typeset .tabbed-labels>label:nth-child(14){order:14}.md-typeset .tabbed-labels>label:nth-child(15){order:15}.md-typeset .tabbed-labels>label:nth-child(16){order:16}.md-typeset .tabbed-labels>label:nth-child(17){order:17}.md-typeset .tabbed-labels>label:nth-child(18){order:18}.md-typeset .tabbed-labels>label:nth-child(19){order:19}.md-typeset .tabbed-labels>label:nth-child(20){order:20}}.md-typeset .tabbed-labels>label:hover{color:var(--md-default-fg-color)}.md-typeset .tabbed-labels>label>[href]:first-child{color:inherit}.md-typeset .tabbed-labels--linked>label{padding:0}.md-typeset .tabbed-labels--linked>label>a{display:block;padding:.78125em 1.25em .625em}.md-typeset .tabbed-content{width:100%}@media print{.md-typeset .tabbed-content{display:contents}}.md-typeset .tabbed-block{display:none}@media print{.md-typeset .tabbed-block{display:block}.md-typeset .tabbed-block:first-child{order:1}.md-typeset .tabbed-block:nth-child(2){order:2}.md-typeset .tabbed-block:nth-child(3){order:3}.md-typeset .tabbed-block:nth-child(4){order:4}.md-typeset .tabbed-block:nth-child(5){order:5}.md-typeset .tabbed-block:nth-child(6){order:6}.md-typeset .tabbed-block:nth-child(7){order:7}.md-typeset .tabbed-block:nth-child(8){order:8}.md-typeset .tabbed-block:nth-child(9){order:9}.md-typeset .tabbed-block:nth-child(10){order:10}.md-typeset .tabbed-block:nth-child(11){order:11}.md-typeset .tabbed-block:nth-child(12){order:12}.md-typeset .tabbed-block:nth-child(13){order:13}.md-typeset .tabbed-block:nth-child(14){order:14}.md-typeset .tabbed-block:nth-child(15){order:15}.md-typeset .tabbed-block:nth-child(16){order:16}.md-typeset .tabbed-block:nth-child(17){order:17}.md-typeset .tabbed-block:nth-child(18){order:18}.md-typeset .tabbed-block:nth-child(19){order:19}.md-typeset .tabbed-block:nth-child(20){order:20}}.md-typeset .tabbed-block>.highlight:first-child>pre,.md-typeset .tabbed-block>pre:first-child{margin:0}.md-typeset .tabbed-block>.highlight:first-child>pre>code,.md-typeset .tabbed-block>pre:first-child>code{border-top-left-radius:0;border-top-right-radius:0}.md-typeset .tabbed-block>.highlight:first-child>.filename{border-top-left-radius:0;border-top-right-radius:0;margin:0}.md-typeset .tabbed-block>.highlight:first-child>.highlighttable{margin:0}.md-typeset .tabbed-block>.highlight:first-child>.highlighttable>tbody>tr>.filename span.filename,.md-typeset .tabbed-block>.highlight:first-child>.highlighttable>tbody>tr>.linenos{border-top-left-radius:0;border-top-right-radius:0;margin:0}.md-typeset .tabbed-block>.highlight:first-child>.highlighttable>tbody>tr>.code>div>pre>code{border-top-left-radius:0;border-top-right-radius:0}.md-typeset .tabbed-block>.highlight:first-child+.result{margin-top:-.125em}.md-typeset .tabbed-block>.tabbed-set{margin:0}.md-typeset .tabbed-button{align-self:center;border-radius:100%;color:var(--md-default-fg-color--light);cursor:pointer;display:block;height:.9rem;margin-top:.1rem;pointer-events:auto;transition:background-color .25s;width:.9rem}.md-typeset .tabbed-button:hover{background-color:var(--md-accent-fg-color--transparent);color:var(--md-accent-fg-color)}.md-typeset .tabbed-button:after{background-color:currentcolor;content:"";display:block;height:100%;-webkit-mask-image:var(--md-tabbed-icon--prev);mask-image:var(--md-tabbed-icon--prev);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;transition:background-color .25s,transform .25s;width:100%}.md-typeset .tabbed-control{background:linear-gradient(to right,var(--md-default-bg-color) 60%,#0000);display:flex;height:1.9rem;justify-content:start;pointer-events:none;position:absolute;transition:opacity 125ms;width:1.2rem}[dir=rtl] .md-typeset .tabbed-control{transform:rotate(180deg)}.md-typeset .tabbed-control[hidden]{opacity:0}.md-typeset .tabbed-control--next{background:linear-gradient(to left,var(--md-default-bg-color) 60%,#0000);justify-content:end;right:0}.md-typeset .tabbed-control--next .tabbed-button:after{-webkit-mask-image:var(--md-tabbed-icon--next);mask-image:var(--md-tabbed-icon--next)}@media screen and (max-width:44.984375em){[dir=ltr] .md-content__inner>.tabbed-set .tabbed-labels{padding-left:.8rem}[dir=rtl] .md-content__inner>.tabbed-set .tabbed-labels{padding-right:.8rem}.md-content__inner>.tabbed-set .tabbed-labels{margin:0 -.8rem;max-width:100vw;scroll-padding-inline-start:.8rem}[dir=ltr] .md-content__inner>.tabbed-set .tabbed-labels:after{padding-right:.8rem}[dir=rtl] .md-content__inner>.tabbed-set .tabbed-labels:after{padding-left:.8rem}.md-content__inner>.tabbed-set .tabbed-labels:after{content:""}[dir=ltr] .md-content__inner>.tabbed-set .tabbed-labels~.tabbed-control--prev{padding-left:.8rem}[dir=rtl] .md-content__inner>.tabbed-set .tabbed-labels~.tabbed-control--prev{padding-right:.8rem}[dir=ltr] .md-content__inner>.tabbed-set .tabbed-labels~.tabbed-control--prev{margin-left:-.8rem}[dir=rtl] .md-content__inner>.tabbed-set .tabbed-labels~.tabbed-control--prev{margin-right:-.8rem}.md-content__inner>.tabbed-set .tabbed-labels~.tabbed-control--prev{width:2rem}[dir=ltr] .md-content__inner>.tabbed-set .tabbed-labels~.tabbed-control--next{padding-right:.8rem}[dir=rtl] .md-content__inner>.tabbed-set .tabbed-labels~.tabbed-control--next{padding-left:.8rem}[dir=ltr] .md-content__inner>.tabbed-set .tabbed-labels~.tabbed-control--next{margin-right:-.8rem}[dir=rtl] .md-content__inner>.tabbed-set .tabbed-labels~.tabbed-control--next{margin-left:-.8rem}.md-content__inner>.tabbed-set .tabbed-labels~.tabbed-control--next{width:2rem}}@media screen{.md-typeset .tabbed-set>input:first-child:checked~.tabbed-labels>:first-child,.md-typeset .tabbed-set>input:nth-child(10):checked~.tabbed-labels>:nth-child(10),.md-typeset .tabbed-set>input:nth-child(11):checked~.tabbed-labels>:nth-child(11),.md-typeset .tabbed-set>input:nth-child(12):checked~.tabbed-labels>:nth-child(12),.md-typeset .tabbed-set>input:nth-child(13):checked~.tabbed-labels>:nth-child(13),.md-typeset .tabbed-set>input:nth-child(14):checked~.tabbed-labels>:nth-child(14),.md-typeset .tabbed-set>input:nth-child(15):checked~.tabbed-labels>:nth-child(15),.md-typeset .tabbed-set>input:nth-child(16):checked~.tabbed-labels>:nth-child(16),.md-typeset .tabbed-set>input:nth-child(17):checked~.tabbed-labels>:nth-child(17),.md-typeset .tabbed-set>input:nth-child(18):checked~.tabbed-labels>:nth-child(18),.md-typeset .tabbed-set>input:nth-child(19):checked~.tabbed-labels>:nth-child(19),.md-typeset .tabbed-set>input:nth-child(2):checked~.tabbed-labels>:nth-child(2),.md-typeset .tabbed-set>input:nth-child(20):checked~.tabbed-labels>:nth-child(20),.md-typeset .tabbed-set>input:nth-child(3):checked~.tabbed-labels>:nth-child(3),.md-typeset .tabbed-set>input:nth-child(4):checked~.tabbed-labels>:nth-child(4),.md-typeset .tabbed-set>input:nth-child(5):checked~.tabbed-labels>:nth-child(5),.md-typeset .tabbed-set>input:nth-child(6):checked~.tabbed-labels>:nth-child(6),.md-typeset .tabbed-set>input:nth-child(7):checked~.tabbed-labels>:nth-child(7),.md-typeset .tabbed-set>input:nth-child(8):checked~.tabbed-labels>:nth-child(8),.md-typeset .tabbed-set>input:nth-child(9):checked~.tabbed-labels>:nth-child(9){color:var(--md-default-fg-color)}.md-typeset .no-js .tabbed-set>input:first-child:checked~.tabbed-labels>:first-child,.md-typeset .no-js .tabbed-set>input:nth-child(10):checked~.tabbed-labels>:nth-child(10),.md-typeset .no-js .tabbed-set>input:nth-child(11):checked~.tabbed-labels>:nth-child(11),.md-typeset .no-js .tabbed-set>input:nth-child(12):checked~.tabbed-labels>:nth-child(12),.md-typeset .no-js .tabbed-set>input:nth-child(13):checked~.tabbed-labels>:nth-child(13),.md-typeset .no-js .tabbed-set>input:nth-child(14):checked~.tabbed-labels>:nth-child(14),.md-typeset .no-js .tabbed-set>input:nth-child(15):checked~.tabbed-labels>:nth-child(15),.md-typeset .no-js .tabbed-set>input:nth-child(16):checked~.tabbed-labels>:nth-child(16),.md-typeset .no-js .tabbed-set>input:nth-child(17):checked~.tabbed-labels>:nth-child(17),.md-typeset .no-js .tabbed-set>input:nth-child(18):checked~.tabbed-labels>:nth-child(18),.md-typeset .no-js .tabbed-set>input:nth-child(19):checked~.tabbed-labels>:nth-child(19),.md-typeset .no-js .tabbed-set>input:nth-child(2):checked~.tabbed-labels>:nth-child(2),.md-typeset .no-js .tabbed-set>input:nth-child(20):checked~.tabbed-labels>:nth-child(20),.md-typeset .no-js .tabbed-set>input:nth-child(3):checked~.tabbed-labels>:nth-child(3),.md-typeset .no-js .tabbed-set>input:nth-child(4):checked~.tabbed-labels>:nth-child(4),.md-typeset .no-js .tabbed-set>input:nth-child(5):checked~.tabbed-labels>:nth-child(5),.md-typeset .no-js .tabbed-set>input:nth-child(6):checked~.tabbed-labels>:nth-child(6),.md-typeset .no-js .tabbed-set>input:nth-child(7):checked~.tabbed-labels>:nth-child(7),.md-typeset .no-js .tabbed-set>input:nth-child(8):checked~.tabbed-labels>:nth-child(8),.md-typeset .no-js .tabbed-set>input:nth-child(9):checked~.tabbed-labels>:nth-child(9),.no-js .md-typeset .tabbed-set>input:first-child:checked~.tabbed-labels>:first-child,.no-js .md-typeset .tabbed-set>input:nth-child(10):checked~.tabbed-labels>:nth-child(10),.no-js .md-typeset .tabbed-set>input:nth-child(11):checked~.tabbed-labels>:nth-child(11),.no-js .md-typeset .tabbed-set>input:nth-child(12):checked~.tabbed-labels>:nth-child(12),.no-js .md-typeset .tabbed-set>input:nth-child(13):checked~.tabbed-labels>:nth-child(13),.no-js .md-typeset .tabbed-set>input:nth-child(14):checked~.tabbed-labels>:nth-child(14),.no-js .md-typeset .tabbed-set>input:nth-child(15):checked~.tabbed-labels>:nth-child(15),.no-js .md-typeset .tabbed-set>input:nth-child(16):checked~.tabbed-labels>:nth-child(16),.no-js .md-typeset .tabbed-set>input:nth-child(17):checked~.tabbed-labels>:nth-child(17),.no-js .md-typeset .tabbed-set>input:nth-child(18):checked~.tabbed-labels>:nth-child(18),.no-js .md-typeset .tabbed-set>input:nth-child(19):checked~.tabbed-labels>:nth-child(19),.no-js .md-typeset .tabbed-set>input:nth-child(2):checked~.tabbed-labels>:nth-child(2),.no-js .md-typeset .tabbed-set>input:nth-child(20):checked~.tabbed-labels>:nth-child(20),.no-js .md-typeset .tabbed-set>input:nth-child(3):checked~.tabbed-labels>:nth-child(3),.no-js .md-typeset .tabbed-set>input:nth-child(4):checked~.tabbed-labels>:nth-child(4),.no-js .md-typeset .tabbed-set>input:nth-child(5):checked~.tabbed-labels>:nth-child(5),.no-js .md-typeset .tabbed-set>input:nth-child(6):checked~.tabbed-labels>:nth-child(6),.no-js .md-typeset .tabbed-set>input:nth-child(7):checked~.tabbed-labels>:nth-child(7),.no-js .md-typeset .tabbed-set>input:nth-child(8):checked~.tabbed-labels>:nth-child(8),.no-js .md-typeset .tabbed-set>input:nth-child(9):checked~.tabbed-labels>:nth-child(9){border-color:var(--md-default-fg-color)}}.md-typeset .tabbed-set>input:first-child.focus-visible~.tabbed-labels>:first-child,.md-typeset .tabbed-set>input:nth-child(10).focus-visible~.tabbed-labels>:nth-child(10),.md-typeset .tabbed-set>input:nth-child(11).focus-visible~.tabbed-labels>:nth-child(11),.md-typeset .tabbed-set>input:nth-child(12).focus-visible~.tabbed-labels>:nth-child(12),.md-typeset .tabbed-set>input:nth-child(13).focus-visible~.tabbed-labels>:nth-child(13),.md-typeset .tabbed-set>input:nth-child(14).focus-visible~.tabbed-labels>:nth-child(14),.md-typeset .tabbed-set>input:nth-child(15).focus-visible~.tabbed-labels>:nth-child(15),.md-typeset .tabbed-set>input:nth-child(16).focus-visible~.tabbed-labels>:nth-child(16),.md-typeset .tabbed-set>input:nth-child(17).focus-visible~.tabbed-labels>:nth-child(17),.md-typeset .tabbed-set>input:nth-child(18).focus-visible~.tabbed-labels>:nth-child(18),.md-typeset .tabbed-set>input:nth-child(19).focus-visible~.tabbed-labels>:nth-child(19),.md-typeset .tabbed-set>input:nth-child(2).focus-visible~.tabbed-labels>:nth-child(2),.md-typeset .tabbed-set>input:nth-child(20).focus-visible~.tabbed-labels>:nth-child(20),.md-typeset .tabbed-set>input:nth-child(3).focus-visible~.tabbed-labels>:nth-child(3),.md-typeset .tabbed-set>input:nth-child(4).focus-visible~.tabbed-labels>:nth-child(4),.md-typeset .tabbed-set>input:nth-child(5).focus-visible~.tabbed-labels>:nth-child(5),.md-typeset .tabbed-set>input:nth-child(6).focus-visible~.tabbed-labels>:nth-child(6),.md-typeset .tabbed-set>input:nth-child(7).focus-visible~.tabbed-labels>:nth-child(7),.md-typeset .tabbed-set>input:nth-child(8).focus-visible~.tabbed-labels>:nth-child(8),.md-typeset .tabbed-set>input:nth-child(9).focus-visible~.tabbed-labels>:nth-child(9){color:var(--md-accent-fg-color)}.md-typeset .tabbed-set>input:first-child:checked~.tabbed-content>:first-child,.md-typeset .tabbed-set>input:nth-child(10):checked~.tabbed-content>:nth-child(10),.md-typeset .tabbed-set>input:nth-child(11):checked~.tabbed-content>:nth-child(11),.md-typeset .tabbed-set>input:nth-child(12):checked~.tabbed-content>:nth-child(12),.md-typeset .tabbed-set>input:nth-child(13):checked~.tabbed-content>:nth-child(13),.md-typeset .tabbed-set>input:nth-child(14):checked~.tabbed-content>:nth-child(14),.md-typeset .tabbed-set>input:nth-child(15):checked~.tabbed-content>:nth-child(15),.md-typeset .tabbed-set>input:nth-child(16):checked~.tabbed-content>:nth-child(16),.md-typeset .tabbed-set>input:nth-child(17):checked~.tabbed-content>:nth-child(17),.md-typeset .tabbed-set>input:nth-child(18):checked~.tabbed-content>:nth-child(18),.md-typeset .tabbed-set>input:nth-child(19):checked~.tabbed-content>:nth-child(19),.md-typeset .tabbed-set>input:nth-child(2):checked~.tabbed-content>:nth-child(2),.md-typeset .tabbed-set>input:nth-child(20):checked~.tabbed-content>:nth-child(20),.md-typeset .tabbed-set>input:nth-child(3):checked~.tabbed-content>:nth-child(3),.md-typeset .tabbed-set>input:nth-child(4):checked~.tabbed-content>:nth-child(4),.md-typeset .tabbed-set>input:nth-child(5):checked~.tabbed-content>:nth-child(5),.md-typeset .tabbed-set>input:nth-child(6):checked~.tabbed-content>:nth-child(6),.md-typeset .tabbed-set>input:nth-child(7):checked~.tabbed-content>:nth-child(7),.md-typeset .tabbed-set>input:nth-child(8):checked~.tabbed-content>:nth-child(8),.md-typeset .tabbed-set>input:nth-child(9):checked~.tabbed-content>:nth-child(9){display:block}:root{--md-tasklist-icon:url('data:image/svg+xml;charset=utf-8,');--md-tasklist-icon--checked:url('data:image/svg+xml;charset=utf-8,')}.md-typeset .task-list-item{list-style-type:none;position:relative}[dir=ltr] .md-typeset .task-list-item [type=checkbox]{left:-2em}[dir=rtl] .md-typeset .task-list-item [type=checkbox]{right:-2em}.md-typeset .task-list-item [type=checkbox]{position:absolute;top:.45em}.md-typeset .task-list-control [type=checkbox]{opacity:0;z-index:-1}[dir=ltr] .md-typeset .task-list-indicator:before{left:-1.5em}[dir=rtl] .md-typeset .task-list-indicator:before{right:-1.5em}.md-typeset .task-list-indicator:before{background-color:var(--md-default-fg-color--lightest);content:"";height:1.25em;-webkit-mask-image:var(--md-tasklist-icon);mask-image:var(--md-tasklist-icon);-webkit-mask-position:center;mask-position:center;-webkit-mask-repeat:no-repeat;mask-repeat:no-repeat;-webkit-mask-size:contain;mask-size:contain;position:absolute;top:.15em;width:1.25em}.md-typeset [type=checkbox]:checked+.task-list-indicator:before{background-color:#00e676;-webkit-mask-image:var(--md-tasklist-icon--checked);mask-image:var(--md-tasklist-icon--checked)}:root>*{--md-mermaid-font-family:var(--md-text-font-family),sans-serif;--md-mermaid-edge-color:var(--md-code-fg-color);--md-mermaid-node-bg-color:var(--md-accent-fg-color--transparent);--md-mermaid-node-fg-color:var(--md-accent-fg-color);--md-mermaid-label-bg-color:var(--md-default-bg-color);--md-mermaid-label-fg-color:var(--md-code-fg-color);--md-mermaid-sequence-actor-bg-color:var(--md-mermaid-label-bg-color);--md-mermaid-sequence-actor-fg-color:var(--md-mermaid-label-fg-color);--md-mermaid-sequence-actor-border-color:var(--md-mermaid-node-fg-color);--md-mermaid-sequence-actor-line-color:var(--md-default-fg-color--lighter);--md-mermaid-sequence-actorman-bg-color:var(--md-mermaid-label-bg-color);--md-mermaid-sequence-actorman-line-color:var(--md-mermaid-node-fg-color);--md-mermaid-sequence-box-bg-color:var(--md-mermaid-node-bg-color);--md-mermaid-sequence-box-fg-color:var(--md-mermaid-edge-color);--md-mermaid-sequence-label-bg-color:var(--md-mermaid-node-bg-color);--md-mermaid-sequence-label-fg-color:var(--md-mermaid-node-fg-color);--md-mermaid-sequence-loop-bg-color:var(--md-mermaid-node-bg-color);--md-mermaid-sequence-loop-fg-color:var(--md-mermaid-edge-color);--md-mermaid-sequence-loop-border-color:var(--md-mermaid-node-fg-color);--md-mermaid-sequence-message-fg-color:var(--md-mermaid-edge-color);--md-mermaid-sequence-message-line-color:var(--md-mermaid-edge-color);--md-mermaid-sequence-note-bg-color:var(--md-mermaid-label-bg-color);--md-mermaid-sequence-note-fg-color:var(--md-mermaid-edge-color);--md-mermaid-sequence-note-border-color:var(--md-mermaid-label-fg-color);--md-mermaid-sequence-number-bg-color:var(--md-mermaid-node-fg-color);--md-mermaid-sequence-number-fg-color:var(--md-accent-bg-color)}.mermaid{line-height:normal;margin:1em 0}.md-typeset .grid{grid-gap:.4rem;display:grid;grid-template-columns:repeat(auto-fit,minmax(min(100%,16rem),1fr));margin:1em 0}.md-typeset .grid.cards>ol,.md-typeset .grid.cards>ul{display:contents}.md-typeset .grid.cards>ol>li,.md-typeset .grid.cards>ul>li,.md-typeset .grid>.card{border:.05rem solid var(--md-default-fg-color--lightest);border-radius:.1rem;display:block;margin:0;padding:.8rem;transition:border .25s,box-shadow .25s}.md-typeset .grid.cards>ol>li:focus-within,.md-typeset .grid.cards>ol>li:hover,.md-typeset .grid.cards>ul>li:focus-within,.md-typeset .grid.cards>ul>li:hover,.md-typeset .grid>.card:focus-within,.md-typeset .grid>.card:hover{border-color:#0000;box-shadow:var(--md-shadow-z2)}.md-typeset .grid.cards>ol>li>hr,.md-typeset .grid.cards>ul>li>hr,.md-typeset .grid>.card>hr{margin-bottom:1em;margin-top:1em}.md-typeset .grid.cards>ol>li>:first-child,.md-typeset .grid.cards>ul>li>:first-child,.md-typeset .grid>.card>:first-child{margin-top:0}.md-typeset .grid.cards>ol>li>:last-child,.md-typeset .grid.cards>ul>li>:last-child,.md-typeset .grid>.card>:last-child{margin-bottom:0}.md-typeset .grid>*,.md-typeset .grid>.admonition,.md-typeset .grid>.highlight>*,.md-typeset .grid>.highlighttable,.md-typeset .grid>.md-typeset details,.md-typeset .grid>details,.md-typeset .grid>pre{margin-bottom:0;margin-top:0}.md-typeset .grid>.highlight>pre:only-child,.md-typeset .grid>.highlight>pre>code,.md-typeset .grid>.highlighttable,.md-typeset .grid>.highlighttable>tbody,.md-typeset .grid>.highlighttable>tbody>tr,.md-typeset .grid>.highlighttable>tbody>tr>.code,.md-typeset .grid>.highlighttable>tbody>tr>.code>.highlight,.md-typeset .grid>.highlighttable>tbody>tr>.code>.highlight>pre,.md-typeset .grid>.highlighttable>tbody>tr>.code>.highlight>pre>code{height:100%}.md-typeset .grid>.tabbed-set{margin-bottom:0;margin-top:0}@media screen and (min-width:45em){[dir=ltr] .md-typeset .inline{float:left}[dir=rtl] .md-typeset .inline{float:right}[dir=ltr] .md-typeset .inline{margin-right:.8rem}[dir=rtl] .md-typeset .inline{margin-left:.8rem}.md-typeset .inline{margin-bottom:.8rem;margin-top:0;width:11.7rem}[dir=ltr] .md-typeset .inline.end{float:right}[dir=rtl] .md-typeset .inline.end{float:left}[dir=ltr] .md-typeset .inline.end{margin-left:.8rem;margin-right:0}[dir=rtl] .md-typeset .inline.end{margin-left:0;margin-right:.8rem}} \ No newline at end of file diff --git a/assets/stylesheets/palette.ab4e12ef.min.css b/assets/stylesheets/palette.ab4e12ef.min.css new file mode 100644 index 000000000..75aaf8425 --- /dev/null +++ b/assets/stylesheets/palette.ab4e12ef.min.css @@ -0,0 +1 @@ +@media screen{[data-md-color-scheme=slate]{--md-default-fg-color:hsla(var(--md-hue),15%,90%,0.82);--md-default-fg-color--light:hsla(var(--md-hue),15%,90%,0.56);--md-default-fg-color--lighter:hsla(var(--md-hue),15%,90%,0.32);--md-default-fg-color--lightest:hsla(var(--md-hue),15%,90%,0.12);--md-default-bg-color:hsla(var(--md-hue),15%,14%,1);--md-default-bg-color--light:hsla(var(--md-hue),15%,14%,0.54);--md-default-bg-color--lighter:hsla(var(--md-hue),15%,14%,0.26);--md-default-bg-color--lightest:hsla(var(--md-hue),15%,14%,0.07);--md-code-fg-color:hsla(var(--md-hue),18%,86%,0.82);--md-code-bg-color:hsla(var(--md-hue),15%,18%,1);--md-code-bg-color--light:hsla(var(--md-hue),15%,18%,0.9);--md-code-bg-color--lighter:hsla(var(--md-hue),15%,18%,0.54);--md-code-hl-color:#2977ff;--md-code-hl-color--light:#2977ff1a;--md-code-hl-number-color:#e6695b;--md-code-hl-special-color:#f06090;--md-code-hl-function-color:#c973d9;--md-code-hl-constant-color:#9383e2;--md-code-hl-keyword-color:#6791e0;--md-code-hl-string-color:#2fb170;--md-code-hl-name-color:var(--md-code-fg-color);--md-code-hl-operator-color:var(--md-default-fg-color--light);--md-code-hl-punctuation-color:var(--md-default-fg-color--light);--md-code-hl-comment-color:var(--md-default-fg-color--light);--md-code-hl-generic-color:var(--md-default-fg-color--light);--md-code-hl-variable-color:var(--md-default-fg-color--light);--md-typeset-color:var(--md-default-fg-color);--md-typeset-a-color:var(--md-primary-fg-color);--md-typeset-kbd-color:hsla(var(--md-hue),15%,90%,0.12);--md-typeset-kbd-accent-color:hsla(var(--md-hue),15%,90%,0.2);--md-typeset-kbd-border-color:hsla(var(--md-hue),15%,14%,1);--md-typeset-mark-color:#4287ff4d;--md-typeset-table-color:hsla(var(--md-hue),15%,95%,0.12);--md-typeset-table-color--light:hsla(var(--md-hue),15%,95%,0.035);--md-admonition-fg-color:var(--md-default-fg-color);--md-admonition-bg-color:var(--md-default-bg-color);--md-footer-bg-color:hsla(var(--md-hue),15%,10%,0.87);--md-footer-bg-color--dark:hsla(var(--md-hue),15%,8%,1);--md-shadow-z1:0 0.2rem 0.5rem #0000000d,0 0 0.05rem #0000001a;--md-shadow-z2:0 0.2rem 0.5rem #00000040,0 0 0.05rem #00000040;--md-shadow-z3:0 0.2rem 0.5rem #0006,0 0 0.05rem #00000059;color-scheme:dark}[data-md-color-scheme=slate] img[src$="#gh-light-mode-only"],[data-md-color-scheme=slate] img[src$="#only-light"]{display:none}[data-md-color-scheme=slate][data-md-color-primary=pink]{--md-typeset-a-color:#ed5487}[data-md-color-scheme=slate][data-md-color-primary=purple]{--md-typeset-a-color:#c46fd3}[data-md-color-scheme=slate][data-md-color-primary=deep-purple]{--md-typeset-a-color:#a47bea}[data-md-color-scheme=slate][data-md-color-primary=indigo]{--md-typeset-a-color:#5488e8}[data-md-color-scheme=slate][data-md-color-primary=teal]{--md-typeset-a-color:#00ccb8}[data-md-color-scheme=slate][data-md-color-primary=green]{--md-typeset-a-color:#71c174}[data-md-color-scheme=slate][data-md-color-primary=deep-orange]{--md-typeset-a-color:#ff764d}[data-md-color-scheme=slate][data-md-color-primary=brown]{--md-typeset-a-color:#c1775c}[data-md-color-scheme=slate][data-md-color-primary=black],[data-md-color-scheme=slate][data-md-color-primary=blue-grey],[data-md-color-scheme=slate][data-md-color-primary=grey],[data-md-color-scheme=slate][data-md-color-primary=white]{--md-typeset-a-color:#5e8bde}[data-md-color-switching] *,[data-md-color-switching] :after,[data-md-color-switching] :before{transition-duration:0ms!important}}[data-md-color-accent=red]{--md-accent-fg-color:#ff1947;--md-accent-fg-color--transparent:#ff19471a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-accent=pink]{--md-accent-fg-color:#f50056;--md-accent-fg-color--transparent:#f500561a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-accent=purple]{--md-accent-fg-color:#df41fb;--md-accent-fg-color--transparent:#df41fb1a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-accent=deep-purple]{--md-accent-fg-color:#7c4dff;--md-accent-fg-color--transparent:#7c4dff1a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-accent=indigo]{--md-accent-fg-color:#526cfe;--md-accent-fg-color--transparent:#526cfe1a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-accent=blue]{--md-accent-fg-color:#4287ff;--md-accent-fg-color--transparent:#4287ff1a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-accent=light-blue]{--md-accent-fg-color:#0091eb;--md-accent-fg-color--transparent:#0091eb1a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-accent=cyan]{--md-accent-fg-color:#00bad6;--md-accent-fg-color--transparent:#00bad61a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-accent=teal]{--md-accent-fg-color:#00bda4;--md-accent-fg-color--transparent:#00bda41a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-accent=green]{--md-accent-fg-color:#00c753;--md-accent-fg-color--transparent:#00c7531a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-accent=light-green]{--md-accent-fg-color:#63de17;--md-accent-fg-color--transparent:#63de171a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-accent=lime]{--md-accent-fg-color:#b0eb00;--md-accent-fg-color--transparent:#b0eb001a;--md-accent-bg-color:#000000de;--md-accent-bg-color--light:#0000008a}[data-md-color-accent=yellow]{--md-accent-fg-color:#ffd500;--md-accent-fg-color--transparent:#ffd5001a;--md-accent-bg-color:#000000de;--md-accent-bg-color--light:#0000008a}[data-md-color-accent=amber]{--md-accent-fg-color:#fa0;--md-accent-fg-color--transparent:#ffaa001a;--md-accent-bg-color:#000000de;--md-accent-bg-color--light:#0000008a}[data-md-color-accent=orange]{--md-accent-fg-color:#ff9100;--md-accent-fg-color--transparent:#ff91001a;--md-accent-bg-color:#000000de;--md-accent-bg-color--light:#0000008a}[data-md-color-accent=deep-orange]{--md-accent-fg-color:#ff6e42;--md-accent-fg-color--transparent:#ff6e421a;--md-accent-bg-color:#fff;--md-accent-bg-color--light:#ffffffb3}[data-md-color-primary=red]{--md-primary-fg-color:#ef5552;--md-primary-fg-color--light:#e57171;--md-primary-fg-color--dark:#e53734;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=pink]{--md-primary-fg-color:#e92063;--md-primary-fg-color--light:#ec417a;--md-primary-fg-color--dark:#c3185d;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=purple]{--md-primary-fg-color:#ab47bd;--md-primary-fg-color--light:#bb69c9;--md-primary-fg-color--dark:#8c24a8;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=deep-purple]{--md-primary-fg-color:#7e56c2;--md-primary-fg-color--light:#9574cd;--md-primary-fg-color--dark:#673ab6;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=indigo]{--md-primary-fg-color:#4051b5;--md-primary-fg-color--light:#5d6cc0;--md-primary-fg-color--dark:#303fa1;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=blue]{--md-primary-fg-color:#2094f3;--md-primary-fg-color--light:#42a5f5;--md-primary-fg-color--dark:#1975d2;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=light-blue]{--md-primary-fg-color:#02a6f2;--md-primary-fg-color--light:#28b5f6;--md-primary-fg-color--dark:#0287cf;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=cyan]{--md-primary-fg-color:#00bdd6;--md-primary-fg-color--light:#25c5da;--md-primary-fg-color--dark:#0097a8;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=teal]{--md-primary-fg-color:#009485;--md-primary-fg-color--light:#26a699;--md-primary-fg-color--dark:#007a6c;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=green]{--md-primary-fg-color:#4cae4f;--md-primary-fg-color--light:#68bb6c;--md-primary-fg-color--dark:#398e3d;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=light-green]{--md-primary-fg-color:#8bc34b;--md-primary-fg-color--light:#9ccc66;--md-primary-fg-color--dark:#689f38;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=lime]{--md-primary-fg-color:#cbdc38;--md-primary-fg-color--light:#d3e156;--md-primary-fg-color--dark:#b0b52c;--md-primary-bg-color:#000000de;--md-primary-bg-color--light:#0000008a}[data-md-color-primary=yellow]{--md-primary-fg-color:#ffec3d;--md-primary-fg-color--light:#ffee57;--md-primary-fg-color--dark:#fbc02d;--md-primary-bg-color:#000000de;--md-primary-bg-color--light:#0000008a}[data-md-color-primary=amber]{--md-primary-fg-color:#ffc105;--md-primary-fg-color--light:#ffc929;--md-primary-fg-color--dark:#ffa200;--md-primary-bg-color:#000000de;--md-primary-bg-color--light:#0000008a}[data-md-color-primary=orange]{--md-primary-fg-color:#ffa724;--md-primary-fg-color--light:#ffa724;--md-primary-fg-color--dark:#fa8900;--md-primary-bg-color:#000000de;--md-primary-bg-color--light:#0000008a}[data-md-color-primary=deep-orange]{--md-primary-fg-color:#ff6e42;--md-primary-fg-color--light:#ff8a66;--md-primary-fg-color--dark:#f4511f;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=brown]{--md-primary-fg-color:#795649;--md-primary-fg-color--light:#8d6e62;--md-primary-fg-color--dark:#5d4037;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3}[data-md-color-primary=grey]{--md-primary-fg-color:#757575;--md-primary-fg-color--light:#9e9e9e;--md-primary-fg-color--dark:#616161;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3;--md-typeset-a-color:#4051b5}[data-md-color-primary=blue-grey]{--md-primary-fg-color:#546d78;--md-primary-fg-color--light:#607c8a;--md-primary-fg-color--dark:#455a63;--md-primary-bg-color:#fff;--md-primary-bg-color--light:#ffffffb3;--md-typeset-a-color:#4051b5}[data-md-color-primary=light-green]:not([data-md-color-scheme=slate]){--md-typeset-a-color:#72ad2e}[data-md-color-primary=lime]:not([data-md-color-scheme=slate]){--md-typeset-a-color:#8b990a}[data-md-color-primary=yellow]:not([data-md-color-scheme=slate]){--md-typeset-a-color:#b8a500}[data-md-color-primary=amber]:not([data-md-color-scheme=slate]){--md-typeset-a-color:#d19d00}[data-md-color-primary=orange]:not([data-md-color-scheme=slate]){--md-typeset-a-color:#e68a00}[data-md-color-primary=white]{--md-primary-fg-color:hsla(var(--md-hue),0%,100%,1);--md-primary-fg-color--light:hsla(var(--md-hue),0%,100%,0.7);--md-primary-fg-color--dark:hsla(var(--md-hue),0%,0%,0.07);--md-primary-bg-color:hsla(var(--md-hue),0%,0%,0.87);--md-primary-bg-color--light:hsla(var(--md-hue),0%,0%,0.54);--md-typeset-a-color:#4051b5}[data-md-color-primary=white] .md-button{color:var(--md-typeset-a-color)}[data-md-color-primary=white] .md-button--primary{background-color:var(--md-typeset-a-color);border-color:var(--md-typeset-a-color);color:hsla(var(--md-hue),0%,100%,1)}@media screen and (min-width:60em){[data-md-color-primary=white] .md-search__form{background-color:hsla(var(--md-hue),0%,0%,.07)}[data-md-color-primary=white] .md-search__form:hover{background-color:hsla(var(--md-hue),0%,0%,.32)}[data-md-color-primary=white] .md-search__input+.md-search__icon{color:hsla(var(--md-hue),0%,0%,.87)}}@media screen and (min-width:76.25em){[data-md-color-primary=white] .md-tabs{border-bottom:.05rem solid #00000012}}[data-md-color-primary=black]{--md-primary-fg-color:hsla(var(--md-hue),15%,9%,1);--md-primary-fg-color--light:hsla(var(--md-hue),15%,9%,0.54);--md-primary-fg-color--dark:hsla(var(--md-hue),15%,9%,1);--md-primary-bg-color:hsla(var(--md-hue),15%,100%,1);--md-primary-bg-color--light:hsla(var(--md-hue),15%,100%,0.7);--md-typeset-a-color:#4051b5}[data-md-color-primary=black] .md-button{color:var(--md-typeset-a-color)}[data-md-color-primary=black] .md-button--primary{background-color:var(--md-typeset-a-color);border-color:var(--md-typeset-a-color);color:hsla(var(--md-hue),0%,100%,1)}[data-md-color-primary=black] .md-header{background-color:hsla(var(--md-hue),15%,9%,1)}@media screen and (max-width:59.984375em){[data-md-color-primary=black] .md-nav__source{background-color:hsla(var(--md-hue),15%,11%,.87)}}@media screen and (max-width:76.234375em){html [data-md-color-primary=black] .md-nav--primary .md-nav__title[for=__drawer]{background-color:hsla(var(--md-hue),15%,9%,1)}}@media screen and (min-width:76.25em){[data-md-color-primary=black] .md-tabs{background-color:hsla(var(--md-hue),15%,9%,1)}} \ No newline at end of file diff --git a/assets/stylesheets/sh.css b/assets/stylesheets/sh.css new file mode 100644 index 000000000..0816e1c7e --- /dev/null +++ b/assets/stylesheets/sh.css @@ -0,0 +1,115 @@ +/* get header font */ +@import url("//fonts.googleapis.com/css?family=Bangers&display=fallback"); + +/* theme color schemes */ +:root { + --md-primary-fg-color: #8c1515; + --md-primary-fg-color--light: #b1040e; + --md-primary-fg-color--dark: #820000; + --md-accent-fg-color: #b1040e; + --md-accent-fg-color--transparent: #b1040e0a; +} + +/* accessibility requirement */ +.md-typeset a:link { + text-decoration: underline; + text-decoration-color: #ffffff00; +} + +/* announce bar */ +.md-banner a, +.md-banner a:focus, +.md-banner a:hover { color: currentColor; } +.md-banner .icon { + margin-left: 0.2em; + color: #b1040e; +} + +/* title style */ +.md-header__title { + font-family: "Bangers", Roboto, Helvetica, Arial, sans-serif; + font-size: 1.2rem; +} +.md-header__title .md-ellipsis:after { + /* prevent text clipping */ + content: ''; + padding: 1px; +} + +/* cookie consent link */ +.md-footer-meta.md-typeset .consent { + color: var(--md-footer-fg-color--lighter); +} + +/* don't cut words in code */ +.md-typeset code { word-break: keep-all; } + +/* Sherlock admonition */ +.md-typeset .admonition.sherlock, +.md-typeset details.sherlock { + border-color: #8c1515; +} +.md-typeset .sherlock > .admonition-title, +.md-typeset .sherlock > summary { + background-color: #82000010; + border-color: #8c1515; +} +.md-typeset .sherlock > .admonition-title::before, +.md-typeset .sherlock > summary::before { + background-color: #8c1515; + -webkit-mask-image: var(--md-admonition-icon--sherlock); + mask-image: var(--md-admonition-icon--sherlock); +} + +/* additional styles */ +.chk_yes { color: darkgreen; } +.chk_no { color: darkred; } +.fl_left { float: left; } +.fl_right { float: right; } + +.sw_mpi { color: darkblue; font-weight: bold; } +.sw_mpi:after { content: "mpi" } +.sw_gpu { color: darkgreen; font-weight: bold; } +.sw_gpu:after { content: "gpu" } +.sw_lic { color: darkred; font-weight: bold; } +.sw_lic:after { content: "lic" } +.sw_def { color: gray; font-weight: bold; } +.sw_def:after { content: "def" } + +.number { + font-size: 1.2rem; + color: var(--md-primary-fg-color); +} +.number_desc { + font-size: 0.8em; + margin-top: -1em !important; + color: var(--md-default-fg-color--light); +} + + +/* pulsate */ +@keyframes pulsate { + 0%, 40%, 80%, 100% { + transform: scale(1); + } + 20%, 60% { + transform: scale(1.15); + } +} +.pulsate { + color: #dd2e44; + animation: pulsate 1000ms infinite; +} + +/* rotate */ +@keyframes rotation { + from { + transform: rotate(0deg); + } + to { + transform: rotate(359deg); + } +} +.rotate { + animation: rotation 2s infinite linear; +} diff --git a/catalog/index.html b/catalog/index.html new file mode 100644 index 000000000..649686dd2 --- /dev/null +++ b/catalog/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/docs/advanced-topics/connection/index.html b/docs/advanced-topics/connection/index.html new file mode 100644 index 000000000..3496491bc --- /dev/null +++ b/docs/advanced-topics/connection/index.html @@ -0,0 +1,21 @@ + Connection options - Sherlock

Advanced connection options#

Login nodes#

Sherlock login nodes are regrouped behind a single DNS alias: login.sherlock.stanford.edu.

This alias provides a load-balanced login environment, and the assurance that you will be connected to the least loaded login node when you connect to Sherlock.

If for any reason, you want to directly connect to a specific login node and bypass the automatic load-balanced dispatching of new connections (which we don't recommend), you can use that login node's hostname explicitly. For instance:

$ ssh <sunetid>@ln21.sherlock.stanford.edu
+

This can be useful if you run long-standing processes on the login nodes, such as screen or tmux sessions. To find them back when you reconnect to Sherlock, you will indeed need to login to the same login node you started them on.

The drawback is that by connecting to a specific login node, you will forfeit the load-balancing benefits, which could result in a crowded environment, or even in login errors in case that specific login node is unavailable.

Authentication methods#

Public-key authentication

SSH public-key authentication is not supported on Sherlock.

The recommended way to authenticate to Sherlock is to simply use your SUNet ID and password, as described in the Connecting page.

Passwords are not stored on Sherlock. Sherlock login nodes will delegate password authentication to the University central Kerberos service.

GSSAPI#

For compatibility with previous generations of Sherlock, GSSAPI1 authentication is still allowed, and could be considered a more convenient option, as this mechanism doesn't require entering your password for each connection.

GSSAPI authentication relies on a token system, where users obtain Kerberos ticket-granting tickets, transmit them via SSH to the server they want to connect to, which will, in turn, verify their validity. That way, passwords are never stored locally, and never transit over the network. That's why Kerberos is usually considered the most secure method to authenticate.

To connect using GSSAPI on Sherlock, you'll need to go through a few steps2:

  1. make sure the Kerberos user tools are installed on your local machine. You'll need the kinit (and optionally klist and kdestroy) utilities. Please refer to your OS documentation to install them if required.

  2. download and install the Stanford krb5.conf file, which contains information about the Stanford Kerberos environment:

    $ sudo curl -o /etc/krb5.conf https://web.stanford.edu/dept/its/support/kerberos/dist/krb5.conf
    +
  3. configure your SSH client, by modifying (or creating if it doesn't exist already) the .ssh/config file in your home directory on your local machine. Using a text editor, you can add the following lines to your ~/.ssh/config file (indentation is important):

    Host login.sherlock.stanford.edu
    +    GSSAPIDelegateCredentials yes
    +    GSSAPIAuthentication yes
    +

Once everything is in place (you only need to do this once), you'll be able to test that your Kerberos installation works by running kinit <sunetid>@stanford.edu. You should get a password prompt, and upon success, you'll be able to list your Kerberos credentials with the klist command:

$ kinit kilian@stanford.edu
+Password for kilian@stanford.edu:
+$ klist
+Ticket cache: FILE:/tmp/krb5cc_215845_n4S4I6KgyM
+Default principal: kilian@stanford.edu
+
+Valid starting     Expires            Service principal
+07/28/17 17:33:54  07/29/17 18:33:32  krbtgt/stanford.edu@stanford.edu
+        renew until 08/04/17 17:33:32
+

Kerberos ticket expiration

Kerberos tickets have a 25-hour lifetime. So you'll need to run the kinit command pretty much once a day to continue being able to authenticate to Sherlock.

Please note that when your Kerberos ticket expire, existing Sherlock connections will not be interrupted. So you'll be able to keep connections open to Sherlock for several days without any issue.

You're now ready to connect to Sherlock using GSSAPI. Simply SSH as usual:

$ ssh <sunetid>@login.sherlock.stanford.edu
+

and if everything goes well, you should directly see the two-factor (Duo) prompt, without having to enter your password.

If you want to destroy your Kerberos ticket before its expiration, you can use the kdestroy command.

SSH options#

OpenSSH offers a variety of configuration options that you can use in ~/.ssh/config on your local computer. The following section describe some of the options you can use with Sherlock that may make connecting and transferring files more convenient.

Avoiding multiple Duo prompts#

In order to avoid getting a second-factor (Duo) prompt every time you want to open a new connection to Sherlock, you can take advantage of the multiplexing features provided by OpenSSH.

Simply add the following lines to your ~/.ssh/config file on your local machine to activate the ControlMaster option. If you already have a Host login.sherlock.stanford.edu block in your configuration file, simply add the Control* option lines in the same block.

Host login.sherlock.stanford.edu
+    ControlMaster auto
+    ControlPath ~/.ssh/%l%r@%h:%p
+

It will allow SSH to re-use an existing connection to Sherlock each time you open a new session (create a new SSH connection), thus avoiding subsequent 2FA prompts once the initial connection is established.

The slight disadvantage of this approach is that once you have a connection open to one of Sherlock's login nodes, all your subsequent connections will be using the same login node. This will somewhat defeat the purpose of the load-balancing mechanism used by the login nodes.

Connection failure with unix_listener error

If your connection fails with the following error message:

unix_listener: "..." too long for Unix domain socket
+
you're being hit by a macOS limitation, and you should replace the ControlPath line above by:
ControlPath ~/.ssh/%C
+

Connecting from abroad#

VPN

As a good security practice, we always recommend to use the Stanford VPN when connecting from untrusted networks.

Access to Sherlock is not restricted to campus, meaning that you can connect to Sherlock from pretty much anywhere, including when traveling abroad. We don't restrict inbound SSH connections to any specific IP address range or geographical location, so you shouldn't have any issue to reach the login nodes from anywhere.

Regarding two-step authentication, University IT provides alternate authentication options when phone service or Duo Mobile push notifications are not available.


  1. The Generic Security Service Application Program Interface (GSSAPI, also GSS-API) is an application programming interface for programs to access security services. It allows program to interact with security services such as Kerberos for user authentication. 

  2. Those instructions should work on Linux and MacOs computers. For Windows , we recommend using the WSL, as described in the Prerequisites page. 

\ No newline at end of file diff --git a/docs/advanced-topics/job-management/index.html b/docs/advanced-topics/job-management/index.html new file mode 100644 index 000000000..4fa31fef7 --- /dev/null +++ b/docs/advanced-topics/job-management/index.html @@ -0,0 +1,19 @@ + Job management - Sherlock

Job management

Job submission limits#

You may have encountered situations where your jobs get rejected at submission with errors like this:

sbatch: error: MaxSubmitJobsPerAccount
+sbatch: error: MaxSubmitJobsPerUser
+

There are a number of limits on Sherlock, that are put in place to guarantee that all of the users can have a fair access to resources and a smooth experience while using them. One of those limits is about the total number of jobs a single user (and a single group) can have in queue at any given time. This helps ensuring that the scheduler is able to continue operating in an optimal fashion, without being overloaded by a single user or group.

To see the job submission limits on Sherlock run the sh_part command.

To run longer than 2 days on the normal partition you will need to add the "long" QOS to your submission scripts. For example to run for exactly 3 days add the following two lines to your sbatch script:

#SBATCH --time=3-00:00:00
+#SBATCH --qos=long
+

If you have access to an owners partition you will not need to add this QOS since the MaxWall on owners is 7 days.

Minimizing the number of jobs in queue#

It's generally a good practice to try reducing the number of jobs submitted to the scheduler, and depending on your workflow, there are various approaches for this. One solution may be to pack more work within a single job, which could help in reducing the overall number of jobs you'll have to submit.

Imagine you have a 100-task array job, where you run 1 app task per array item, which looks like this:

#!/bin/bash
+#SBATCH --array=1-100
+#SBATCH -n 1
+
+./app ${SLURM_ARRAY_TASK_ID}
+

This script would create 100 jobs in queue (even though they would all be regrouped under the same job array), each using 1 CPU to run 1 task.

Instead of that 100-task array job, you can try something like this:

#!/bin/bash
+#SBATCH --array=0-99:10
+#SBATCH -n 10
+
+for i in {0..9}; do
+    srun -n 1 ./app $((SLURM_ARRAY_TASK_ID+i)) &
+done
+
+wait # important to make sure the job doesn't exit before the background tasks are done
+
  • --array=0-99:10 will use job array indexes 0, 10, 20 ... 90
  • -n 10 will make sure each job can be subdivided in 10 1-CPU steps
  • the for loop will launch 10 tasks, with indexes from SLURM_ARRAY_TASK_ID to SLURM_ARRAY_TASK_ID + 9.

This would submit a 10-task array job, each of them running 10 steps simultaneously, on the 10 CPUs that each of the job array item will be allocated.

In the end, you'll have run the same number of app instances, but you'll have divided the number of jobs submitted by 10, and allow you to submit the same amount of work to the scheduler, while staying under the submission limits.

\ No newline at end of file diff --git a/docs/advanced-topics/node-features/index.html b/docs/advanced-topics/node-features/index.html new file mode 100644 index 000000000..1b50b7648 --- /dev/null +++ b/docs/advanced-topics/node-features/index.html @@ -0,0 +1,23 @@ + Node features - Sherlock

Node features

In heterogeneous environments, computing resources are often grouped together into single pools of resources, to make things easier and more accessible. Most applications can run on any type of hardware, so having all resources regrouped in the same partitions maximizes utilization and make job submission much easier, as users don't have dozens of options to choose from.

But for more specific use cases, it may be necessary to specifically select the hardware jobs will run on, either for performance or reproducibility purposes.

To that end, all the compute nodes on Sherlock have feature tags assigned to them. Multiple characteristics are available for each node, such as their class, CPU manufacturer, generation, part number and frequency, as well as Infiniband and GPU characteristics.

Requiring specific node features is generally not necessary

Using node features is an advanced topic which is generally not necessary to run simple jobs on Sherlock. If you're just starting, you most likely don't need to worry about those, they're only useful in very specific cases.

Available features#

The table below lists the possible features defined for each node.

Feature name Description Examples
CLASS:xxx Node type, as defined in the Sherlock catalog CLASS:SH3_CBASE, CLASS:SH3_G4TF64
CPU_MNF:xxx CPU manufacturer CPU_MNF:INTEL, CPU_MNF:AMD
CPU_GEN:xxx CPU generation CPU_GEN:RME for AMD Rome
CPU_GEN:SKX for Intel Skylake
CPU_SKU:xxx CPU name CPU_SKU:5118, CPU_SKU:7502P
CPU_FRQ:xxx CPU core base frequency CPU_FRQ:2.50GHz, CPU_FRQ:2.75GHz
GPU_BRD:xxx GPU brand GPU_BRD:GEFORCE, GPU_BRD:TESLA
GPU_GEN:xxx GPU generation GPU_GEN:VLT for Volta
GPU_GEN:AMP for Ampere
GPU_SKU:xxx GPU name GPU_SKU:A100_SXM4, GPU_SKU:RTX_3090
GPU_MEM:xxx GPU memory GPU_MEM:32GB, GPU_MEM:80GB
GPU_CC:xxx GPU Compute Capabilities GPU_CC:6.1, GPU_CC:8.0
IB:xxx Infiniband generation/speed IB:EDR, IB:HDR
NO_GPU special tag set on CPU-only nodes

Listing the features available in a partition#

All the node features available in a partition can be listed with sh_node_feat command.

For instance, to list all the GPU types in the gpu partition:

$ sh_node_feat -p gpu | grep GPU_SKU
+GPU_SKU:P100_PCIE
+GPU_SKU:P40
+GPU_SKU:RTX_2080Ti
+GPU_SKU:V100_PCIE
+GPU_SKU:V100S_PCIE
+GPU_SKU:V100_SXM2
+

To list all the CPU generations available in the normal partition:

$ sh_node_feat -p normal | grep CPU_GEN
+CPU_GEN:BDW
+CPU_GEN:MLN
+CPU_GEN:RME
+CPU_GEN:SKX
+

Requesting specific node features#

Those node features can be used in job submission options, as additional constraints for the job, so that the scheduler will only select nodes that match the requested features.

Adding job constraints often increases job pending times

It's important to keep in mind that requesting specific node features usually increases job pending times in queue. The more constraints the scheduler has to satisfy, the smaller the pool of compute nodes jobs can run on. hence the longer it may take for the scheduler to find eligible resources to run those jobs.

To specify a node feature as a job constraint, the -C/--constraint option can be used.

For instance, to submit a job that should only run on an AMD Rome CPU, you can add the following to your job submission options:

#SBATCH -C CPU_GEN:RME
+

Or to make sure that your training job will run on a GPU with 80GB of GPU memory:

#SBATCH -G 1
+#SBATCH -C GPU_MEM:80GB
+

Multiple constraints#

For more complex cases, multiple constraints could be composed in different ways, using logical operators.

Many node feature combinations are impossible to satisfy

Many combinations will result in impossible conditions, and will make jobs impossible to run on any node. The scheduler is usualyl able to detect this and reject the job at submission time.

For instance, submitting a job requesting an Intel CPU on the HDR IB fabric:

#SBATCH -C 'CPU_MNF:INTEL&IB:HDR'
+

will result in the following error:

error: Job submit/allocate failed: Requested node configuration is not available
+

as all the compute nodes on the IB fabric use AMD CPUs. Constraints must be used carefully and sparsingly to avoid unexpected suprises.

Some of the possible logical operations between constraints are listed below:

AND#

Only nodes with all the requested features are eligible to run the job. The ampersand sign (&) is used as the AND operator. For example:

#SBATCH -C 'GPU_MEM:32GB&IB:HDR'
+

will request a GPU with 32GB of memory on the HDR Infiniband fabric to run the job.

OR#

Only nodes with at least one of specified features will be eligible to run the job. The pipe sign (|) is used as the OR operator.

In multi-node jobs, it means that nodes allocated to the job may end up having different features. For example, the following options:

#SBATCH -N 1
+#SBATCH -C "CPU_GEN:RME|CPU_GEN:MLN"
+

may result in a two-node job where one node as an AMD Rome CPU, and the other node has a AMD Milan CPU.

Matching OR:#

When you need all nodes in a multi-node job to have the same set of features, a matching OR condition can be defined by enclosing the options within square brackets ([,]).

For instance, the following options may be used to request a job to run on nodes with the same frequency, either 2.5 GHz or 2.75GHz:

#SBATCH -C "[CPU_FRQ:2.50GHz|CPU_FRQ:2.75GHz]"
+

Node features are text tags

Node features are text tags, they have no associated numerical value, meaning that they can't be compared.

For instance, it's possible to add a constraint for GPU Compute Capabilities greater than 8.0. The workaround is to add a job constraint that satisfies all the possible values of that tag, like:

#SBATCH -C "GPU_CC:8.0|GPU_CC:8.6"
+

For more information, complete details about the --constraints/-C job submission option and its syntax can be found in the official Slurm documentation.

\ No newline at end of file diff --git a/docs/concepts/index.html b/docs/concepts/index.html new file mode 100644 index 000000000..bd5091c8e --- /dev/null +++ b/docs/concepts/index.html @@ -0,0 +1 @@ + Concepts - Sherlock

Concepts

Sherlock, a shared resource#

Sherlock is a shared compute cluster available for use by all Stanford faculty members and their research teams to support departmental or sponsored research.

Sherlock is a resource for research

Sherlock is not suitable for course work, class assignments or general-use training sessions.

Users interested in using computing resources in such contexts are encouraged to investigate FarmShare, Stanford’s community computing environment, which is primarily intended for supporting coursework.

It is open to the Stanford community as a computing resource to support departmental or sponsored research, thus a faculty member's sponsorship is required for all user accounts.

Usage policy

Please note that your use of this system falls under the "Computer and Network Usage Policy", as described in the Stanford Administrative Guide. In particular, sharing authentication credentials is strictly prohibited. Violation of this policy will result in termination of access to Sherlock.

Sherlock is designed, deployed, maintained and operated by Stanford Research Computing staff. Stanford Research Computing is a joint effort of the Dean of Research and IT Services to build and support a comprehensive program to advance computational research at Stanford.

Sherlock has been initially purchased and supported with seed funding from Stanford's Provost. It comprises a set of freely available compute nodes, a few specific resources such as large-memory machines and GPU servers, as well as the associated networking equipment and storage. These resources can be used to run computational codes and programs, and are managed through a job scheduler using a fair-share algorithm.

Data risk classification#

Low and Moderate Risk data

Sherlock is approved for computing with Low and Moderate Risk data only.

High Risk data

Sherlock is NOT approved to store or process HIPAA, PHI, PII nor any kind of High Risk data. The system is approved for computing with Low and Moderate Risk data only, and is not suitable to process High Risk data.

Users are responsible for ensuring the compliance of their own data.

For more information about data risk classifications, see the Information Security Risk Classification page.

Investing in Sherlock#

For users who need more than casual access to a shared computing environment, Sherlock also offers Faculty members the possibility to invest in additional, dedicated computing resources.

Unlike traditional clusters, Sherlock is a collaborative system where the majority of nodes are purchased and shared by the cluster users. When a user (typically a PI) purchases one or more nodes, they become an owner. Owners choose from a standard set of server configurations supported by Stanford Research Computing (known as the Sherlock catalog) to add to the cluster.

When they're not in use, PI-purchased compute nodes can be used by other owners. This model also allows Sherlock owners to benefit from the scale of the cluster by giving them access to more compute nodes than their individual purchase, which gives them much greater flexibility than owning a standalone cluster.

The majority of Sherlock nodes are owners nodes

The vast majority of Sherlock's compute nodes have been purchased by individual PIs and groups, and PI purchases are the main driver behind the rapid expansion of the cluster, which went from 120 nodes to more than 1,000 nodes in less than 3 years.

The resource scheduler configuration works like this:

  • owners and their research teams get immediate and exclusive access to the resources they purchased,
  • when those nodes are idle, other owners can use them,
  • when the purchasing owners want to use their resources, jobs from other owners that may be running on them are preempted (ie. killed and re-queued).

This provides a way to get more resources to run less important jobs in the background, while making sure that an owner always gets immediate access to his/her own nodes.

Participating owners also have shared access to the public, shared Sherlock nodes, along with everyone else.

Benefits#

Benefits to owners include:

no wait time in queue: immediate and exclusive access to the purchased nodes

access to more resources: possibility to submit jobs to the other owners' nodes when they're not in use

Compared to hosting and managing computing resources on your own, purchasing nodes on Sherlock provides:

  • data center hosting, including backup power and cooling
  • system configuration, maintenance and administration
  • hardware diagnostics and repairs

Those benefits come in addition to the other Sherlock advantages:

  • access to high-performance, large parallel scratch storage space
  • access to snapshot'ed, replicated, enterprise-class storage space
  • optimized software stack, especially tailored for a range of research needs
  • tools to build and install additional software applications as needed
  • user support

Limitations#

Purchasing nodes on Sherlock is different from traditional server hosting.

In particular, purchasing your own compute nodes on Sherlock will NOT allow:

root access: owner nodes on Sherlock are still managed by Stanford Research Computing staff in accordance with Stanford's Minimum Security Standards. Although users are welcome to install (or request) any software they may need, purchasing compute nodes on Sherlock does not allow root access to the nodes.

running permanent services: permanent processes such as web servers or databases can only run on owner nodes through the scheduler, using recurring or persistent jobs. Purchasing compute nodes on Sherlock does not provide a way to run anything that couldn't run on freely-available nodes.

direct network connectivity: owners' nodes are connected to the Sherlock's internal network and are not directly accessible from the outside, which means that they can't host public services like web or application servers.

bypassing the scheduler: jobs running on owners' nodes still need to be submitted to the scheduler. Direct shell access to the nodes is not possible outside of scheduled interactive sessions.

hardware changes: the hardware components of purchased nodes cannot be modified, removed, swapped or upgraded during the nodes' service lifetime.

configuration: the configuration of purchased nodes is tuned to provide optimal performance over a majority of use cases and applications, is identical on all nodes across the cluster, and cannot be changed, modified or altered in any way.

persistent local storage: local storage space provided on the compute nodes is only usable for the duration of a job and cannot be used to store long-term data.

additional storage space: purchasing compute nodes on Sherlock does not provide additional storage space. Please note that Stanford Research Computing does offer the possibility for PIs to purchase their own storage space on Oak, for their long-term research data needs.

Purchasing nodes#

If you are interested in becoming an owner, you can find the latest information about ordering Sherlock nodes on the ordering page. Feel free to contact us is you have any additional question.

Cluster generations#

The research computing landscape evolves very quickly, and to both accommodate growth and technological advances, it's necessary to adapt the Sherlock environment to these evolutions.

Every year or so, a new generation of processors is released, which is why, over a span of several years, multiple generations of CPUs and GPUs make their way into Sherlock. This provides users with access to the latest features and performance enhancements, but it also adds some heterogeneity to the cluster, which is important to keep in mind when compiling software and requesting resources to run them.

Another key component of Sherlock is the interconnect network that links all of Sherlock's compute nodes together and act as a backbone for the whole cluster. This network fabric is of finite capacity, and based on the individual networking switches characteristics and the typical research computing workflows, it can accommodate up to about 850 compute nodes.

As nodes get added to Sherlock, the number of available ports decreases, and at some point, the fabric gets full and no more nodes can be added. Sherlock reached that stage for the first time in late 2016, which prompted the installation of a whole new fabric, to allow for further system expansion.

This kind of evolution is the perfect opportunity to upgrade other components too: management software, ancillary services architecture and user applications. In January 2017, those components were completely overhauled and a new, completely separate cluster was kick-started, using using a different set of hardware and software, while conserving the same storage infrastructure, to ease the transition process.

After a transition period, the older Sherlock hardware, compute and login nodes, have been be merged in the new cluster, and from a logical perspective (connection, job scheduling and computing resources), nodes attached to each of the fabrics have been reunited to form a single cluster again.

As Sherlock continues to evolve and grow, the new fabric will also approach capacity again, and the same process will happen again to start the next generation of Sherlock.

Maintenances and upgrades#

Stanford Research Computing institutes a monthly scheduled maintenance window on Sherlock, to ensure optimal operation, avoid potential issues and prepare for future expansions. This window will be used to make hardware repairs, software and firmware updates, and perform general manufacturer recommended maintenance on our environment.

As often as possible, maintenance tasks are performed in a rolling, non-disruptive fashion, but downtimes are sometimes an unfortunate necessity to allow disruptive operations that can't be conducted while users are working on the system.

Maintenance schedule

As often as possible, maintenances will take place on the first Tuesday of every month, from 08:00 to 12:00 Pacific time (noon), and will be announced 2 weeks in advance, through the usual communication channels.

In case an exceptional amount of work is required, the maintenance window could be extended to 10 hours (from 08:00 to 18:00).

During these times, access to Sherlock will be unavailable, login will be disabled and jobs won't run. A reservation will be placed in the scheduler so running jobs can finish before the maintenance, and jobs that wouldn't finish by the maintenance window would be pushed after it.

Common questions#

Q: Why doing maintenances at all?

A: Due to the scale of our computing environment and the increasing complexity of the systems we deploy, it is prudent to arrange for a regular time when we can comfortably and without pressure fix problems or update facilities with minimal impact to our customers. Most, if not all, major HPC centers have regular maintenance schedules. We also need to enforce the Minimum Security rules instituted by the Stanford Information Security Office, which mandate deployment of security patches in a timely manner.

Q: Why Tuesdays 08:00-12:00? Why not do this late at night?

A: We have observed that the least busy time for our services is at the beginning of the week in the morning hours. Using this time period should not interrupt most of our users. If the remote possibility of a problem that extends past the scheduled downtime occurs, we would have our full staff fresh and available to assist in repairs and quickly restore service.

Q: I have jobs running, what will happen to them?

A: For long-running jobs, we strongly recommend checkpointing your results on a periodic basis. Besides, we will place a reservation in the scheduler for each maintenance that would prevent jobs to run past it. This means that the scheduler will only allow jobs to run if they can finish by the time the maintenance starts. If you submit a long job soon before the maintenance, it will be delayed until after the maintenance. That will ensure that no work is lost when the maintenance starts.

\ No newline at end of file diff --git a/docs/credits/index.html b/docs/credits/index.html new file mode 100644 index 000000000..e20c326f1 --- /dev/null +++ b/docs/credits/index.html @@ -0,0 +1,10 @@ + Credits - Sherlock

About us#

Stanford Research Computing#

logo

Stanford Research Computing) is a joint effort of the Dean of Research and IT Services to build and support a comprehensive program to advance computational research at Stanford. That includes offering and supporting traditional high performance computing (HPC) systems, as well as systems for high throughput and data-intensive computing.

The Stanford Research Computing team also helps researchers transition their analyses and models from the desktop to more capable and plentiful resources, providing the opportunity to explore their data and answer research questions at a scale typically not possible on desktops or departmental servers. Partnering with national initiatives and program as well as vendors, Stanford Research Computing offers training and learning opportunities around HPC tools and technologies.

For more information, please see the Stanford Research Computing website

Credits#

We would like to thank the following companies for their generous sponsorship, and for providing services and resources that help us manage Sherlock every day:

The Sherlock website and documentation also rely on the following projects:

Why the Sherlock name?#

If you're curious about where the Sherlock name came from, we always considered that computing resources in general and HPC clusters in particular should be the catalyst of innovation, be ahead of their time, and spur new discoveries.

And what better account of what's happening on a high-performance computing cluster than Benedict Cumberbatch describing his role as Sherlock Holmes in the BBC's modern adaptation of Arthur Conan Doyle's classic?

Benedict Cumberbatch, about Sherlock

There's a great charge you get from playing him, because of the volume of words in your head and the speed of thought – you really have to make your connections incredibly fast. He is one step ahead of the audience, and of anyone around him with normal intellect. They can't quite fathom where his leaps are taking him.

Yes, exactly. That's Sherlock.

Sherlock, of HBO fame#

And finally, we couldn't resist to the pleasure of citing the most prestigious accomplishment of Sherlock to date: a mention in HBO's Silicon Valley Season 4 finale!

screencap screencap

Yep, you got that right, Richard Hendricks wanted to use our very own Sherlock!

compression_stars{ align=left style="height:100px; margin-top:0" } Kudos to the show's crew and a big thank you to HBO Data compression stars, Professor Tsachy Weissman and Dmitri Pavlichin, for this incredible Sherlock shout-out. This has been an everlasting source of pride and amazement for the whole SRCC team! ❤

\ No newline at end of file diff --git a/docs/getting-started/connecting/index.html b/docs/getting-started/connecting/index.html new file mode 100644 index 000000000..be9414576 --- /dev/null +++ b/docs/getting-started/connecting/index.html @@ -0,0 +1,70 @@ + Connecting - Sherlock

Connecting to Sherlock #

Sherlock account required

To be able to connect to Sherlock, you must first obtain a Sherlock account.

Credentials#

All users must have a Stanford SUNet ID and a Sherlock account to log in to Sherlock. Your Sherlock account uses the same username/password as your SUnet ID:

Username: SUNet ID
+Password: SUNet ID password
+

To request a Sherlock account, please see the Prerequisites page.

Resetting passwords

Sherlock does not store your SUNet ID password. As a consequence, we are unable to reset your password. If you require password assistance, please see the SUNet Account page.

Connection#

Access to Sherlock is provided via Secure Shell (SSH) login. Most Unix-like operating systems provide an SSH client by default that can be accessed by typing the ssh command in a terminal window.

To login to Sherlock, open a terminal and type the following command, where <sunetid> should be replaced by your actual SUNet ID:

$ ssh <sunetid>@login.sherlock.stanford.edu
+

Upon logging in, you will be connected to one of Sherlock's load-balanced login node. You should be automatically directed to the least-loaded login node at the moment of your connection, which should give you the best possible environment to work.

Host keys#

Upon your very first connection to Sherlock, you will be greeted by a warning such as :

The authenticity of host 'login.sherlock.stanford.edu' can't be established.
+ECDSA key fingerprint is SHA256:eB0bODKdaCWtPgv0pYozsdC5ckfcBFVOxeMwrNKdkmg.
+Are you sure you want to continue connecting (yes/no)?
+

The same warning will be displayed if your try to connect to one of the Data Transfer Node (DTN):

The authenticity of host 'dtn.sherlock.stanford.edu' can't be established.
+ECDSA key fingerprint is SHA256:eB0bODKdaCWtPgv0pYozsdC5ckfcBFVOxeMwrNKdkmg.
+Are you sure you want to continue connecting (yes/no)?
+

This warning is normal: your SSH client warns you that it is the first time it sees that new computer. To make sure you are actually connecting to the right machine, you should compare the ECDSA key fingerprint shown in the message with one of the fingerprints below:

Key type Key Fingerprint
RSA SHA256:T1q1Tbq8k5XBD5PIxvlCfTxNMi1ORWwKNRPeZPXUfJA
legacy format: f5:8f:01:46:d1:f9:66:5d:33:58:b4:82:d8:4a:34:41
ECDSA SHA256:eB0bODKdaCWtPgv0pYozsdC5ckfcBFVOxeMwrNKdkmg
legacy format: 70:4c:76:ea:ae:b2:0f:81:4b:9c:c6:5a:52:4c:7f:64

If they match, you can proceed and type ‘yes’. Your SSH program will then store that key and will verify it for every subsequent SSH connection, to make sure that the server you're connecting to is indeed Sherlock.

Host keys warning#

If you've connected to Sherlock 1.0 before, there's a good chance the Sherlock 1.0 keys were stored by your local SSH client. In that case, when connecting to Sherlock 2.0 using the sherlock.stanford.edu alias, you will be presented with the following message:

@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+@ WARNING: POSSIBLE DNS SPOOFING DETECTED! @
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+The RSA host key for sherlock.stanford.edu has changed, and the key for
+the corresponding IP address 171.66.97.101 is unknown. This could
+either mean that DNS SPOOFING is happening or the IP address for the
+host and its host key have changed at the same time.
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+@ WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED! @
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+IT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY!
+Someone could be eavesdropping on you right now (man-in-the-middle
+attack)!  It is also possible that a host key has just been changed.
+The fingerprint for the RSA key sent by the remote host is
+SHA256:T1q1Tbq8k5XBD5PIxvlCfTxNMi1ORWwKNRPeZPXUfJA.
+Please contact your system administrator.
+

You can just check that the SHA256 key listed in that warning message correctly matches the one listed in the table above, and if that's the case, you can safely remove the sherlock.stanford.edu entry from your ~/.ssh/known_hosts file with the following command on your local machine:

$ ssh-keygen -R sherlock.stanford.edu
+

and then connect again. You'll see the first-connection prompt mentioned above, and your SSH client will store the new keys for future connections.

Authentication#

Password#

To ease access and increase compatibility1 with different platforms, Sherlock allows a simple password-based authentication mechanism for SSH.2.

Upon connection, you will be asked for your SUNet ID password with the following prompt:

<sunetid>@login.sherlock.stanford.edu's password:
+

Enter your password, and if it's correct, you should see the following line:

Authenticated with partial success.
+

Second factor (2FA)#

Sherlock implements Stanford's Minimum Security Standards policies which mandate two-step authentication to access the cluster.

Two-step authentication protects your personal information and credentials by combining something only you know (your password) with something only you have (your phone, tablet or token). This prevents an attacker who would steal your password to actually use it to impersonate you. For more details about two-step authentication at Stanford, please refer to the University IT two-step page.

After successfully entering your password, you'll be prompted for your second authentication factor with a message like this:

Duo two-factor login for <sunetid>
+
+Enter a passcode or select one of the following options:
+
+ 1. Duo Push to XXX-XXX-9999
+ 2. Phone call to XXX-XXX-9999
+ 3. SMS passcodes to XXX-XXX-9999 (next code starts with: 9)
+
+Passcode or option (1-3):
+

Avoiding two-factor prompt on each connection

If you routinely open multiple sessions to Sherlock, having to confirm each one of them with a second authentication factor could rapidely become cumbersome. To work around this, the OpenSSH client allows multiplexing channels and re-using existing authenticated for opening new sessions. Please see the Advanced Connection Options page for more details.

If your second factor is accepted, you'll see the following message:

Success. Logging you in...
+

Troubleshooting#

Timeouts#

If you ever encounter timeout errors when connecting to Sherlock, like these:

$ ssh login.sherlock.stanford.edu
+ssh: connect to host login.sherlock.stanford.edu port 22: Operation timed out
+

you can try to either:

  • switch to a wired connection if you're connecting over wifi,
  • connect via the Stanford VPN

Authentication failures#

Excessive authentication failures

Entering an invalid password multiple times will result in a (temporary) ban of your IP address.

To prevent brute-force password guessing attacks on Sherlock login nodes, we automatically block IP addresses that generate too many authentication failures in a given time span. This results in a temporary ban of the infringing IP address, and the impossibility for the user to connect to Sherlock from that IP address.

When this happens, your SSH connection attempts will result in the following error:

ssh: connect to host login.sherlock.stanford.edu port 22: Connection refused
+

IP blocked by this mechanism will automatically be authorized again after a few minutes.

SSHFS on macOS

SSHFS on macOS is known to try to automatically reconnect filesystem mounts after resuming from sleep or uspend, even without any valid credentials. As a result, it will generate a lot of failed connection attempts and likely make your IP address blacklisted on login nodes.

Make sure to unmount your SSHFS drives before putting your macOS system to sleep to avoid this situation.

VPN

If your IP got blocked and you have an urgent need to connect, before the automatic blacklist expiration, we recommend trying to connect through Stanford's VPN: your computer will then use a different IP address and will not be affected by the ban on your regular IP address.

Login#

Congratulations! You've successfully connected to Sherlock. You'll be greeted by the following message of the day:

             --*-*- Stanford Research Computing Center -*-*--
+                  ____  _               _            _
+                 / ___|| |__   ___ _ __| | ___   ___| | __
+                 \___ \| '_ \ / _ \ '__| |/ _ \ / __| |/ /
+                  ___) | | | |  __/ |  | | (_) | (__|   <
+                 |____/|_| |_|\___|_|  |_|\___/ \___|_|\_\
+
+-----------------------------------------------------------------------------
+  This system is for authorized users only and users must comply with all
+  Stanford computing, network and research policies. All activity may be
+  recorded for security and monitoring purposes. For more information, see
+  https://doresearch.stanford.edu/policies/research-policy-handbook and
+  https://adminguide.stanford.edu/chapter-6/subchapter-2/policy-6-2-1
+-----------------------------------------------------------------------------
+  Sherlock is *NOT* approved for storing or processing HIPAA, PHI, PII nor
+  any kind of High Risk data. Users are responsible for the compliance of
+  their data.
+  See https://uit.stanford.edu/guide/riskclassifications for details.
+-----------------------------------------------------------------------------
+
+        Docs         https://www.sherlock.stanford.edu/docs
+        Support      https://www.sherlock.stanford.edu/docs/#support
+
+        Web          https://www.sherlock.stanford.edu
+        News         https://news.sherlock.stanford.edu
+        Status       https://status.sherlock.stanford.edu
+
+-----------------------------------------------------------------------------
+

Once authenticated to Sherlock, you'll see the following prompt:

[<sunetid>@sh03-ln01 login! ~]$

It indicates the name of the login node you've been connected to, and a reminder that you're actually connected to a login node, not a compute node.

Login nodes are not for computing

Login nodes are shared among many users and therefore must not be used to run computationally intensive tasks. Those should be submitted to the scheduler which will dispatch them on compute nodes.

By contrast, the shell prompt on a compute node looks like this:

[<sunetid>@sh03-01n01 ~]$

Start computing#

To start computing, there's still a extra step required, which is requesting resources to run your application. It's all described in the next section.


  1. On Sherlock 1.0, GSSAPI tokens (based on Kerberos tickets) were the only allowed authentication method, which could cause some interoperability with third-party SSH clients. 

  2. For other methods of authentication, see the Advanced Connection Options page. 

\ No newline at end of file diff --git a/docs/getting-started/index.html b/docs/getting-started/index.html new file mode 100644 index 000000000..55f3f2be8 --- /dev/null +++ b/docs/getting-started/index.html @@ -0,0 +1 @@ + Getting started - Sherlock

Getting started#

Prerequisites#

To start using Sherlock, you will need:

  • an active SUNet ID,

    What is a SUNet ID?

    A SUNet ID is a unique 3-8 character account name that identifies you as a member of the Stanford community, with access to the Stanford University Network of computing resources and services. Not to be confused with University ID (a 8-digit number that appears on your Stanford ID Card), your SUNet ID is a permanent and visible part of your Stanford identity and often appears in your Stanford email address (eg. sunetid@stanford.edu).

    SUNet IDs are not managed by Research Computing. For more information, see https://accounts.stanford.edu/

    SUNet ID service levels and external collaborators

    Base-level service is sufficient for Sherlock accounts. External collaborators, or users without a SUNet ID, can be sponsored by a PI a get a sponsored SUNet ID at no cost. Please see the sponsorship page for more information.

  • a Sherlock account,

  • a SSH client,
  • good understanding of the concepts and terms used throughout that documentation,
  • some familiarity with Unix/Linux command-line environments, and notions of shell scripting.

How to request an account#

To request an account, the sponsoring Stanford faculty member should email srcc-support@stanford.edu, specifying the names and SUNet IDs of his/her research team members needing an account.

Sherlock is open to the Stanford community as a computing resource to support departmental or sponsored research, thus a faculty member's explicit consent is required for account requests.

Sherlock is a resource for research

Sherlock is a resource to help and support research, and is not suitable for course work, class assignments or general-use training sessions.

There is no fee associated with using Sherlock, and no limit in the amount of accounts each faculty member can request. We will periodically ensure that all accounts associated with each PI are still active, and reserve the right to close any Sherlock account whose SUNet ID is expired.

SSH clients#

Linux #

Linux distributions usually come with a version of the OpenSSH client already installed. So no additional software installation is required. If not, please refer to your distribution's documentation to install it.

macOS #

macOS systems usually come with a version of the OpenSSH client already installed. So no additional software installation is required

Windows #

Microsoft Windows includes a SSH client by default, that can be used to connect to Sherlock from a Windows terminal.

Windows also has a feature called the "Windows Subsystem for Linux" (WSL), which provides a Linux-like experience and make switching across systems more seamless. Please refer to the official documentation or this HOWTO for installation instructions.

The two options above will ensure the best compatibility with the Sherlock environment. If you'd like to explore other avenues, many other SSH client implementations are available, but have not necessarily been tested with Sherlock, so your mileage may vary.

Unix/Linux resources#

A full tutorial on using Unix/Linux is beyond the scope of this documentation. However, there are many tutorials for beginning to use Unix/Linux on the web.

A few tutorials we recommend are:

More specifically about HPC and Research Computing:

Text editors#

Multiple text editors are available on Sherlock. For beginners, we recommend the use of nano. And for more advanced uses, you'll also find below some resources about using vim

Note: you can also create/edit files with the Sherlock OnDemand File editor

Shell scripting#

Compute jobs launched on Sherlock are most often initialized by user-written shell scripts. Beyond that, many common operations can be simplified and automated using shell scripts.

For an introduction to shell scripting, you can refer to:

\ No newline at end of file diff --git a/docs/getting-started/prerequisites/index.html b/docs/getting-started/prerequisites/index.html new file mode 100644 index 000000000..e0c38c74d --- /dev/null +++ b/docs/getting-started/prerequisites/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/docs/getting-started/submitting/index.html b/docs/getting-started/submitting/index.html new file mode 100644 index 000000000..fe5ffe216 --- /dev/null +++ b/docs/getting-started/submitting/index.html @@ -0,0 +1,26 @@ + Submitting jobs - Sherlock

Submitting jobs

Principle#

Login nodes are not for computing

Login nodes are shared among many users and therefore must not be used to run computationally intensive tasks. Those should be submitted to the scheduler which will dispatch them on compute nodes.

Requesting resources#

A mandatory prerequisite for running computational tasks on Sherlock is to request computing resources. This is done via a resource scheduler, whose very purpose is to match compute resources in the cluster (CPUs, GPUs, memory, ...) with user resource requests.

The scheduler provides three key functions:

  1. it allocates access to resources (compute nodes) to users for some duration of time so they can perform work.
  2. it provides a framework for starting, executing, and monitoring work (typically a parallel job such as MPI) on a set of allocated nodes.
  3. it arbitrates contention for resources by managing a queue of pending jobs

Slurm#

Sherlock uses Slurm, an open-source resource manager and job scheduler, used by many of the world's supercomputers and computer clusters.

Slurm supports a variety of job submission techniques. By accurately requesting the resources you need, you’ll be able to get your work done.

Wait times in queue

As a quick rule of thumb, it's important to keep in mind that the more resources your job requests (CPUs, GPUs, memory, nodes, and time), the longer it may have to wait in queue before it could start.

In other words: accurately requesting resources to match your job's needs will minimize your wait times.

How to submit a job#

A job consists in two parts: resource requests and job steps.

Resource requests describe the amount of computing resource (CPUs, GPUs, memory, expected run time, etc.) that the job will need to successfully run.

Job steps describe tasks that must be executed.

Batch scripts#

The typical way of creating a job is to write a job submission script. A submission script is a shell script (e.g. a Bash script) whose first comments, if they are prefixed with #SBATCH, are interpreted by Slurm as parameters describing resource requests and submissions options1.

The submission script itself is a job step. Other job steps are created with the srun command.

For instance, the following script would request one task with one CPU for 10 minutes, along with 2 GB of memory, in the default partition:

submit.sh
#!/bin/bash
+#
+#SBATCH --job-name=test
+#
+#SBATCH --time=10:00
+#SBATCH --ntasks=1
+#SBATCH --cpus-per-task=1
+#SBATCH --mem-per-cpu=2G
+
+srun hostname
+srun sleep 60
+

When started, the job would run a first job step srun hostname, which will launch the command hostname on the node on which the requested CPU was allocated. Then, a second job step will start the sleep command.

You can create this job submission script on Sherlock using a text editor such as nano or vim, and save it as submit.sh.

#SBATCH directives syntax

#SBATCH directives must be at the top of the script

Slurm will ignore all #SBATCH directives after the first non-comment line (that is, the first line in the script that doesn't start with a # character). Always put your #SBATCH parameters at the top of your batch script.

Spaces in parameters will cause #SBATCH directives to be ignored

Slurm will ignore all #SBATCH directives after the first white space. For instance directives like those:

#SBATCH --job-name=big job
+
#SBATCH --mem=16 G
+
#SBATCH --partition=normal, owners
+
will cause all following #SBATCH directives to be ignored and the job to be submitted with the default parameters.

Job submission#

Once the submission script is written properly, you can submit it to the scheduler with the sbatch command. Upon success, sbatch will return the ID it has assigned to the job (the jobid).

$ sbatch submit.sh
+Submitted batch job 1377
+

Check the job#

Once submitted, the job enters the queue in the PENDING state. When resources become available and the job has sufficient priority, an allocation is created for it and it moves to the RUNNING state. If the job completes correctly, it goes to the COMPLETED state, otherwise, its state is set to FAILED.

You'll be able to check the status of your job and follow its evolution with the squeue -u $USER command:

$ squeue -u $USER
+     JOBID PARTITION     NAME     USER ST       TIME  NODES NODELIST(REASON)
+      1377    normal     test   kilian  R       0:12      1 sh02-01n01
+

The scheduler will automatically create an output file that will contain the result of the commands run in the script file. That output file is names slurm-<jobid>.out by default, but can be customized via submission options. In the above example, you can list the contents of that output file with the following commands:

$ cat slurm-1377.out
+sh02-01n01
+

Congratulations, you've submitted your first batch job on Sherlock!

What's next?#

Actually, quite a lot. Although you now know how to submit a simple batch job, there are many other options and areas to explore in the next sections:


  1. You can get the complete list of parameters by referring to the sbatch manual page (man sbatch). 

\ No newline at end of file diff --git a/docs/glossary/index.html b/docs/glossary/index.html new file mode 100644 index 000000000..0260e57e3 --- /dev/null +++ b/docs/glossary/index.html @@ -0,0 +1 @@ + Glossary - Sherlock

Glossary

What's a cluster?#

A computing cluster is a federation of multiple compute nodes (independent computers), most commonly linked together through a high-performance interconnect network.

What makes it a "super-computer" is the ability for a program to address resources (such as memory, CPU cores) located in different compute nodes, through the high-performance interconnect network.

overview

On a computing cluster, users typically connect to login nodes, using a secure remote login protocol such as SSH. Unlike in traditional interactive environments, users then need to prepare compute jobs to submit to a resource scheduler. Based on a set of rules and limits, the scheduler will then try to match the jobs' resource requirements with available resources such as CPUs, memory or computing accelerators such as GPUs. It will then execute the user defined tasks on the selected resources, and generate output files in one of the different storage locations available on the cluster, for the user to review and analyze.

Cluster components#

The terms that are typically used to describe cluster components could be confusing, so in an effort to clarify things, here's a schema of the most important ones, and their definition. components

CPU#

A Central Processing Unit (CPU), or core, or CPU core, is the smallest unit in a microprocessor that can carry out computational tasks, that is, run programs. Modern processors typically have multiple cores.

Socket#

A socket is the connector that houses the microprocessor. By extension, it represents the physical package of a processor, that typically contains multiple cores.

Node#

A node is a physical, stand-alone computer, that can handle computing tasks and run jobs. It's connected to other compute nodes via a fast network interconnect, and contains CPUs, memory and devices managed by an operating system.

Cluster#

A cluster is the complete collection of nodes with networking and file storage facilities. It's usually a group of independent computers connected via a fast network interconnect, managed by a resource manager, which acts as a large parallel computer.

Other commonly used terms#

To make this documentation more accessible, we try to explain key terms in a non-technical way. When reading these pages, please keep in mind the following definitions, presented in alphabetical order:

Application#

An application is a computer program designed to perform a group of coordinated functions, tasks, or activities for the benefit of the user. In the context of scientific computing, an application typically performs computations related to a scientific goal (molecular dynamics simulations, genome assembly, compuational fluid dynamics simulations, etc).

Backfill#

Backfill scheduling is a method that a scheduler can use in order to maximize utilization. It allows smaller (both in terms of size and time requirements), lower priority jobs to start before larger, higher priority ones, as long as doing so doesn't push back the higher-priority jobs expected start time.

Executable#

A binary (or executable) program refers to the machine-code compiled version of an application. This is which is a binary file that a computer can execute directly. As opposed to the application source code, which is the human-readable version of the application internal instructions, and which needs to be compiled by a compiler to produce the executable binary.

Fairshare#

A resource scheduler ranks jobs by priority for execution. Each job's priority in queue is determined by multiple factors, among which one being the user's fairshare score. A user's fairshare score is computed based on a target (the given portion of the resources that this user should be able to use) and the user's effetive usage, ie the amount of resources (s)he effectively used in the past. As a result, the more resources past jobs have used, the lower the priority of the next jobs will be. Past usage is computed based on a sliding window and progressively forgotten over time. This enables all users on a shared resource to get a fair portion of it for their own use, by giving higher priority to users who have been underserved in the past.

FLOPS#

Floating-point Operations Per Second (FLOPS) are a measure of computing performance, and represent the number of floating-point operations that a CPU can perform each second. Modern CPUs and GPUs are capable of doing TeraFLOPS (10^12 floating-point operations per second), depending on the precision of those operations (half-precision: 16 bits, single-precision: 32 bits, double-precision: 64 bits).

GPU#

A Graphical Processing Unit (GPU) is a specialized device initially designed to generate graphical output. On modern computing architecture, they are used to accelerate certain types of computation, which they are much faster than CPUs at. GPUs have their own memory, and are attached to CPUs, within a node. Each compute node can host one or more GPUs.

HPC#

High Performance Computing (HPC) refers to the practice of aggregating computing power to achieve higher performance that would be possible by using a typical computer.

Infiniband#

Infiniband is a networking standard that features high bandwidth and low latency. The current Infiniband devices are capable of transferring data at up to 200 Gbits/sec with less than a microsecond latency. As of this writing, the popular Infiniband versions are HDR (High Data Rate) with 200 Gbits/sec and EDR (Enhanced Data Rate) with 100 Gbits/sec.

IOPS#

Input/output operations per second (IOPS, pronounced eye-ops) is an input/output performance measurement used to characterize computer storage system performance.

Job#

A job, or batch job, is the scheduler’s base unit of computing by which resources are allocated to a user for a specified amount of time. Users create job submission scripts to ask the scheduler for resources such as cores, memory, runtime, etc. The scheduler puts the requests in a queue and allocates requested resources based on jobs’ priority.

Job step#

Job steps are sets of (possibly parallel) tasks within a job

Login nodes#

Login nodes are points of access to a compute cluster. Users usually connect to login nodes via SSH to compile and debug their code, review their results, do some simple tests, and submit their batch jobs to the parallel computer.

Login nodes are not for computing

Login nodes are usually shared among many users and therefore must not be used to run computationally intensive tasks. Those should be submitted to the scheduler which will dispatch them on compute nodes.

Modules#

Environment modules, or software modules, are a type of software management tool used on in most HPC environments. Using modules enable users to selectively pick the software that they want to use and add them to their environment. This allows to switch between different versions or flavors of the same software, pick compilers, libraries and software components and avoid conflicts between them.

MPI#

Message Passing Interface (MPI) is a standardized and portable message-passing system designed to exchange information between processes running on different nodes. There are several implementations of the MPI standard, which is the most common way used to scale parallel applications beyond a single compute node.

OpenMP#

Open Multi Processing (OpenMP) is a parallel programming model designed for shared memory architecture. It's based on pragmas that can be added in applications to let the compiler generate a code that can run on multiple cores, within the same node.

Partition#

A partition is a set of compute nodes within a cluster with a common feature. For example, compute nodes with GPU, or compute nodes belonging to same owner, could form a partition.

On Sherlock, you can see detailed partition information with the sh_part or sinfo commands.

QOS#

A Quality Of Service (QOS) is the set of rules and limitations that apply to a categories of job. The combination of a partition (set of machines where a job can run) and QOS (set of rules that applies to that job) makes what is often referred to as a scheduler queue.

Run time#

The run time, or walltime, of a job is the time required to finish its execution.

Scheduler#

The goal of a job scheduler is to find the appropriate resources to run a set of computational tasks in the most efficient manner. Based on resource requirements and job descriptions, it will prioritize those jobs, allocate resources (nodes, CPUs, memory) and schedule their execution.

Slurm#

Simple Linux Utility for Resource Management (SLURM) is a software that manages computing resources and schedule tasks on them. Slurm coordinates running of many programs on a shared facility and makes sure that resources are used in an optimal manner.

SSH#

Secure Shell (SSH) is a protocol to securely access remote computers. Based on the client-server model, multiple users with an SSH client can access a remote computer. Some operating systems such as Linux and Mac OS have a built-in SSH client and others can use one of many publicly available clients.

Thread#

A process, in the simplest terms, is an executing program. One or more threads run in the context of the process. A thread is the basic unit to which the operating system allocates processor time. A thread can execute any part of the process code, including parts currently being executed by another thread. Threads are co-located on the same node.

Task#

In the Slurm context, a task is to be understood as a process. A multi-process program is made of several tasks. A task is typically used to schedule a MPI process, that in turn can use several CPUs. By contrast, a multi-threaded program is composed of only one task, which uses several CPUs.
\ No newline at end of file diff --git a/docs/images/bighead.png b/docs/images/bighead.png new file mode 100644 index 000000000..9e73be97f Binary files /dev/null and b/docs/images/bighead.png differ diff --git a/docs/images/cluster_components.png b/docs/images/cluster_components.png new file mode 100644 index 000000000..52af1f68a Binary files /dev/null and b/docs/images/cluster_components.png differ diff --git a/docs/images/cluster_overview.png b/docs/images/cluster_overview.png new file mode 100644 index 000000000..52ce8f864 Binary files /dev/null and b/docs/images/cluster_overview.png differ diff --git a/docs/images/compression_stars.png b/docs/images/compression_stars.png new file mode 100644 index 000000000..7f23e1d12 Binary files /dev/null and b/docs/images/compression_stars.png differ diff --git a/docs/images/richard.png b/docs/images/richard.png new file mode 100644 index 000000000..4c2bbbf13 Binary files /dev/null and b/docs/images/richard.png differ diff --git a/docs/images/srcc.png b/docs/images/srcc.png new file mode 100644 index 000000000..29e6225a7 Binary files /dev/null and b/docs/images/srcc.png differ diff --git a/docs/index.html b/docs/index.html new file mode 100644 index 000000000..551ebadf5 --- /dev/null +++ b/docs/index.html @@ -0,0 +1,16 @@ + Sherlock documentation - Sherlock

Sherlock documentation#

Sherlock

Welcome to Sherlock!#

Sherlock is a High-Performance Computing (HPC) cluster, operated by the Stanford Research Computing Center to provide computing resources to the Stanford community at large. You'll find all the documentation, tips, FAQs and information about Sherlock among these pages.

Why use Sherlock?#

Using Sherlock for your work provides many advantages over individual solutions: hosted in an on-premises, state-of-the-art datacenter, the Sherlock cluster is powered and cooled by installations that are optimized for scientific computing.

On Sherlock, simulations and workloads benefit from performance levels that only large scale HPC systems can offer: high-performance I/O infrastructure, petabytes of storage, large variety of hardware configurations, GPU accelerators, centralized system administration and management provided by the Stanford Research Computing.

Such features are not easily accessible at the departmental level, and often require both significant initial investments and recurring costs. Joining Sherlock allows researchers and faculty members to avoid those costs and benefit from economies of scale, as well as to access larger, professionally managed computing resources that what would not be available on an individual or even departmental basis.

How much does it cost?#

Sherlock is free to use for anyone doing departmental or sponsored research at Stanford. Any faculty member can request access for research purposes, and get an account with a base storage allocation and unlimited compute time on the global, shared pool of resources.

No CPU.hour charge

Unlike all Cloud Service Providers and many HPC systems, there is no usage charge on Sherlock.

When you submit your work on Sherlock, you don't need to keep an eye on the clock and worry about how much that run will cost you. There is no limit on the total amount of computing you can run on the cluster, as long as resources are available, and there's no charge to use them, no matter how large or small your computations are.

In case those free resources are not sufficient, Stanford Research Computing offers Faculty members the opportunity to invest into the cluster, and get access to additional computing resources for their research teams. Using a traditional compute cluster condominium model, participating faculty and their teams get priority access to the resources they purchase. When they're idle, those resources are available to use by other owners on the cluster, giving them access to virtually unlimited resources.

Information sources#

Searching the docs

If you're looking for information on a specific topic, the Search feature of this site will allow you to quickly find the page you're looking for. Just press S, F or / to open the Search bar and start typing.

To help users take their first steps on Sherlock, we provide documentation and information through various channels:

Channel URL Purpose
Documentation
You are here
www.sherlock.stanford.edu/docs information to help new users start on Sherlock, and more in-depth documentation for users already familiar with the environment.
Changelog news.sherlock.stanford.edu announces, news and updates about Sherlock.
Dashboard status.sherlock.stanford.edu status of Sherlock's main components and services, outages, planned maintenance.

To get started, you can take a look at the concepts and glossary pages to get familiar with the terminology used throughout the documentation pages. Then, we recommend going through the following sections:

Acknowledgment / citation#

It is important and expected that publications resulting from computations performed on Sherlock acknowledge this. The following wording is suggested:

Acknowledgment

Some of the computing for this project was performed on the Sherlock cluster. We would like to thank Stanford University and the Stanford Research Computing Center for providing computational resources and support that contributed to these research results.

Support#

Research Computing support can be reached by sending an email to srcc-support@stanford.edu and mentioning Sherlock.

How to submit effective support requests

To ensure a timely and relevant response, please make sure to include some additional details, such as job ids, commands executed and error messages received, so we can help you better. For more details, see the Troubleshooting page.

As a member of the Sherlock community, you're also automatically subscribed to the sherlock-announce mailing-list, which is only used by the Stanford Research Computing team to send important announcements about Sherlock.

Onboarding sessions#

We offer regular onboarding sessions for new Sherlock users.

On-boarding session times

On-boarding sessions are offered every first Wednesday of the month, 1PM-2PM PST, via Zoom

These one-hour sessions are a brief introduction to Sherlock's layout, its scheduler, the different file systems available on the cluster, as well as some job submission and software installation best practices for new users. They are a good intro course if you are new to Sherlock or HPC in general.

If you can't attend live on-boarding sessions, you can still take a look at the on-boarding slides as well as to this session recording.

Office hours#

Sending a question to srcc-support@stanford.edu is always the best first option for questions. That way you can include detailed descriptions of the problem or question, valuable output and error messages and any steps you took when you encountered your error. Also, everyone on our team will see your ticket, enabling the most appropriate group member to respond.

Office hours are a good place for more generalized questions about Sherlock, Slurm, Linux usage, data storage, queue structures/scheduling, job optimization and general capabilities of Sherlock. It's also useful for more technically nuanced questions that may not be easily answered with our ticketing system. In office hours some problems can indeed be solved quickly or progress can be made so that you can then work self-sufficiently towards a solution on your own.

COVID-19 update

We'll be holding remote office hours via Zoom, for the time being.

Office hours times

Click here to join the Sherlock Office Hours Zoom

  • Tuesday 10-11am
  • Thursday 3-4pm

You'll need a full-service SUNet ID (basically, a @stanford.edu email address) in order to authenticate and join Office Hours via Zoom. If you do not have a full service account, please contact us at srcc-support@stanford.edu.

If you can't make any of the Office Hours sessions, you can also make an appointment with Sherlock's support team.

What to expect#

  • We cannot accommodate walk-ins: we're unfortunately not staffed to welcome unscheduled visits, so please make sure that you're planning to stop by during office hours. We will not be able to help you otherwise.

  • We can rarely help with application-specific or algorithm problems.

  • You should plan your projects sufficiently in advance and not come to office hours at the last minute before a deadline. Sherlock is a busy resource with several thousand users and you should not expect your jobs to complete before a given date.

  • Not all questions and problems can be answered or solved during office hours, especially ones involving hardware, filesystem or network issues. Sherlock features several thousand computing, networking and storage components, that are constantly being monitored by our team. You can be sure that when Sherlock has an issue, we are aware of it and working on it.

User community#

Sherlock is present on the Stanford Slack Grid, and you're more than welcome to join the following channels:

  • #sherlock-announce, for announcements related to Sherlock and its surrounding services,
  • #sherlock-users, as a place for Sherlock users to connect directly with each other. If you have general questions about Sherlock, want to reach out to other Sherlock users to share tips, good practices, tutorials or other info, please feel free to do so there.

For more details about the SRCC Slack Workspace, and instructions on how to join this workspace and its channels, please see the Stanford Research Computing support page.

Slack is not an official support channel

Please note that while Stanford Research Computing staff will monitor these channels, the official way to get support is still to email us at srcc-support@stanford.edu.

Quick Start#

If you're in a rush1, here's a 3-step ultra-quick start:

  1. connect to Sherlock
$ ssh login.sherlock.stanford.edu
+
  1. get an interactive session on a compute node
[kilian@sh-ln01 login! ~]$ sh_dev
+
  1. run a command
[kilian@sh02-01n58 ~]$ module load python
+[kilian@sh02-01n58 ~]$ python -c "print('Hello Sherlock')"
+Hello Sherlock
+

Congrats! You ran your first job on Sherlock!

Replay#

Here's what it looks like in motion:


  1. even in a rush, you'll still need an account on the cluster. See the Prerequisites page for details. 

\ No newline at end of file diff --git a/docs/orders/index.html b/docs/orders/index.html new file mode 100644 index 000000000..4081066ae --- /dev/null +++ b/docs/orders/index.html @@ -0,0 +1,16 @@ + Ordering nodes - Sherlock

Ordering nodes on Sherlock#

For research groups needing access to additional, dedicated computing resources on Sherlock, we offer the possibility for PIs to purchase their own compute nodes to add to the cluster.

Operating costs for managing and housing PI-purchased compute nodes are waived in exchange for letting other users make use of any idle compute cycles on the PI-owned nodes. Owners have priority access to the computing resources they purchase, but can access more nodes for their research if they need to. This provides the PI with much greater flexibility than owning a standalone cluster.

Conditions#

Service term#

Compute nodes are purchased for a duration of 4 years

Compute nodes are purchased and maintained based on a 4-year lifecycle, which is the duration of the equipment warranty and vendor support.

Owners will be notified during the 4th year that their nodes' lifetime is about to reach its term, at which point they'll be welcome to either:

  • renew their investment by purchasing new nodes,
  • continue to use the public portion of Sherlock's resources.

At the end of their service term, compute nodes are physically retired from the cluster, to make room for new equipment. Compute nodes may be kept running for an additional year at most after the end of their service term, while PIs plan for equipment refresh. Nodes failing during this period may not be repaired, and failed hardware will be disabled or removed from the system.

Please note that outside of exceptional circumstances, nodes purchased in Sherlock cannot be removed from cluster before the end of their service term.

Shared ownership#

Minimum order of one node per PI

The number of nodes in a shared order must be greater or equal to the number of purchasing PI groups.

For operational, administrative as well as usability reasons, we do not support shared ownership of equipment. Meaning that multiple PI groups cannot purchase and share a single compute node. Shared orders have a minimum of one node per purchasing PI group.

Compute nodes catalog#

Stanford Research Computing offers a select number of compute node configurations that have been tested and validated on Sherlock and that aim to cover most computing needs.

Sherlock catalog

Complete details are available in the Sherlock compute nodes catalog 3

Configurations#

We try to provide hardware configurations that can cover the needs and requirements of a wide range of computing applications, in various scientific fields, and to propose a spectrum of pricing tiers, as shown in the table below:

Type Description Recommended usage Price range
CBASE Base configuration Best per-core performance for serial applications, multi-threaded (OpenMP) and distributed (MPI) applications.
Most flexible and cost-effective configuration
$
CPERF High-core count configuration Multi-threaded applications requiring higher numbers of CPU cores $$
CBIGMEM Large-memory configuration Serial or multi-threaded applications requiring terabytes of memory (genome assembly, etc...) $$$$
G4FP32 Base GPU configuration Single-precision (FP32) GPU-accelerated applications (CryoEM, MD...) with low GPU memory requirements $$
G4FP64 HPC GPU configuration AI, ML/DL and GPU-accelerated HPC codes requiring double-precision (FP64) and larger amounts of GPU memory $$$
G4TF64
G8TF64
Best-in-class GPU configuration AI, ML/DL and GPU-accelerated HPC codes requiring double-precision (FP64), large amounts of GPU memory, and heavy multi-GPU scaling $$$$
Choosing the best node configuration for your needs

Although some configurations may appear cheaper when looking at the dollar/core ratio, this is not the only point to consider when determining the best configuration for your workload.

Performance per core

There are other factors to take into account, notably the memory and I/O bandwidth per core, which could be lower on higher core-count configurations like CPERF. With multiple times more cores than CBASE, they still provide the same total amount of bandwidth to remote and local storage, as well as, to a lesser extend, to memory. Higher core-count CPUs also often offer lower core frequencies, which combined with less bandwidth per core, may result in lower performance for serial jobs.

CPERF nodes are an excellent fit for multi-threaded applications that don't span multiple nodes. But for more diverse workloads, they don't offer the same level of flexibility than the CBASE nodes, which can run a mix of serial, multi-threaded and MPI applications equally well.

Resources availability

Another important factor to take into account is that less nodes for a given number of cores offers less resilience against potential hardware failures: if a 128-core node becomes unavailable for some reason, that's 128 cores that nobody can use while the node is being repaired. But with 128 cores in 4x 32-core nodes, if a node fails, there are still 96 cores that can be used.

We'll be happy to help you determine the best configuration for your computing needs, feel free to reach out to schedule a consultation.

Configuration details for the different compute node types are listed in the Sherlock compute nodes catalog 3

Prices#

Prices for the different compute node types are listed in the Sherlock compute nodes catalog 3. They include tax and shipping fees, and are subject to change when quoted: they tend to follow the market-wide variations induced by global political and economical events, which are way outside of our control. Prices are provided there as a guideline for expectations.

There are two components in the cost of a compute node purchase:

  1. the cost of the hardware itself (capital purchase),

  2. a one-time, per-node infrastructure fee1 that will be charged to cover the costs of connecting the nodes to the cluster infrastructure (racks, PDUs, networking switches, cables...)

No recurring fees

There is currently no recurring fee associated with purchasing compute nodes on Sherlock. In particular, there is no CPU.hour charge, purchased nodes are available to their owners 100% of the time, at no additional cost.

Currently, there are no user, administrative or management fees associated with ongoing system administration of the Sherlock environment. However, PIs should anticipate the eventuality of modest system administration and support fees being levied within the 4 year lifetime of their compute nodes.

Purchasing process#

Minimum purchase

Please note that the minimum purchase is one physical server per PI group. We cannot accommodate multiple PIs pooling funds for a single node.

Single-node orders may incur additional delays

Some node configurations need to be ordered from the vendor by sets of 4 nodes (see the Sherlock catalog for details). So orders for quantities non-multiples of 4 need will to be grouped with other PI's orders, which may incur additional delays.

Purchasing nodes on Sherlock is usually a 5-step process:

  1. the PI use the order form to submit an order,
  2. Stanford Research Computing requests a formal vendor quote to finalize pricing and communicate it back to the PI for approval,
  3. Stanford Research Computing submits a Stanford PO to the vendor,
  4. Stanford Research Computing takes delivery of the hardware and proceeds to its installation,
  5. Stanford Research Computing notifies the PI that their nodes are ready to be used.

The typical delay between a PO submission to the vendor and the availability of the compute nodes to the PIs is usually between 4 and 8 weeks.

Supply chain disruption and component shortages

Global supply chain issues and component shortages have considerably increased lead times, and compute node deliveries are currently in the 6-month range.

Required information#

To place an order, we'll need the following information:

  • The SUNet ID of the PI making the purchase request
  • A PTA2 number to charge the hardware (capital) portion of the purchase
  • A PTA2 number to charge the per-node infrastructure fees (non-capital)
    It could be the same PTA used for the capital portion of the purchase, or a different one

Hardware costs could be spread over multiple PTAs (with a maximum of 2 PTAs per order). But please note that the infrastructure fees have to be charged to a single PTA.

Placing an order#

To start ordering compute nodes for Sherlock:

check the Sherlock catalog 3 to review prices and select your configurations

Choose

fill in the order form 3 to submit your request and provide the required information

Order

And we'll be in touch shortly!


  1. infrastructure fees are considered non-capital for cost accounting purposes and may incur indirect cost burdens on cost-reimbursable contracts and grants. 

  2. PTA is an acronym used for a Project-Task-Award combination representing an account in the Stanford Financial system. 

  3. SUNet ID required, document restricted to @stanford.edu accounts. 

\ No newline at end of file diff --git a/docs/overview/about/index.html b/docs/overview/about/index.html new file mode 100644 index 000000000..6f1fcf0e4 --- /dev/null +++ b/docs/overview/about/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/docs/overview/concepts/index.html b/docs/overview/concepts/index.html new file mode 100644 index 000000000..d7ee56184 --- /dev/null +++ b/docs/overview/concepts/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/docs/overview/index.html b/docs/overview/index.html new file mode 100644 index 000000000..e0c38c74d --- /dev/null +++ b/docs/overview/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/docs/overview/introduction/index.html b/docs/overview/introduction/index.html new file mode 100644 index 000000000..083e1867d --- /dev/null +++ b/docs/overview/introduction/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/docs/overview/orders/index.html b/docs/overview/orders/index.html new file mode 100644 index 000000000..f9894f70c --- /dev/null +++ b/docs/overview/orders/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/docs/overview/orders/process/index.html b/docs/overview/orders/process/index.html new file mode 100644 index 000000000..4e23a8b7f --- /dev/null +++ b/docs/overview/orders/process/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/docs/overview/tech/facts/index.html b/docs/overview/tech/facts/index.html new file mode 100644 index 000000000..4d32a662b --- /dev/null +++ b/docs/overview/tech/facts/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/docs/overview/tech/glossary/index.html b/docs/overview/tech/glossary/index.html new file mode 100644 index 000000000..82c15389f --- /dev/null +++ b/docs/overview/tech/glossary/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/docs/overview/tech/index.html b/docs/overview/tech/index.html new file mode 100644 index 000000000..9b729243b --- /dev/null +++ b/docs/overview/tech/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/docs/overview/tech/specs/index.html b/docs/overview/tech/specs/index.html new file mode 100644 index 000000000..875a2d208 --- /dev/null +++ b/docs/overview/tech/specs/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/docs/overview/tech/status/index.html b/docs/overview/tech/status/index.html new file mode 100644 index 000000000..a7f144eca --- /dev/null +++ b/docs/overview/tech/status/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/docs/software/containers/apptainer/index.html b/docs/software/containers/apptainer/index.html new file mode 100644 index 000000000..8b0a3f708 --- /dev/null +++ b/docs/software/containers/apptainer/index.html @@ -0,0 +1,107 @@ + Singularity - Sherlock

Singularity#

Singularity is an open source container platform designed to run complex applications on high-performance computing (HPC) clusters in a simple, portable, and reproducible way. It's like Docker, but for HPC systems.

Why not Docker?#

Docker has long been the reference and the most popular container framework in DevOps and Enterprise IT environments, so why not use Docker on Sherlock? Well, for a variety of technical reasons, mostly related to security.

Docker has never been designed nor developed to run in multi-tenants environments, and even less on HPC clusters. Specifically:

  • Docker requires a daemon running as root on all of the compute nodes, which has serious security implications,
  • all authenticated actions (such as login, push ...) are also executed as root, meaning that multiple users can't use those functions on the same node,
  • Docker uses cgroups to isolate containers, as does the Slurm scheduler, which uses cgroups to allocate resources to jobs and enforce limits. Those uses are unfortunately conflicting.
  • but most importantly, allowing users to run Docker containers will give them root privileges inside that container, which will in turn let them access any of the clusters' filesystems as root. This opens the door to user impersonation, inappropriate file tampering or stealing, and is obviously not something that can be allowed on a shared resource.

That last point is certainly the single most important reason why we won't use Docker on Sherlock.

Why Singularity?#

Singularity is Docker for HPC systems

Singularity allows running Docker containers natively, and is a perfect replacement for Docker on HPC systems such as Sherlock. That means that existing Docker container can be directly imported and natively run with SIngularity.

Despite Docker's shortcomings on HPC systems, the appeal of containers for scientific computing is undeniable, which is why we provide Singularity on Sherlock. Singularity is an alternative container framework, especially designed to run scientific applications on HPC clusters.

Singularity provides the same functionalities as Docker, without any of the drawbacks listed above. Using a completely different implementation, it doesn't require any privilege to run containers, and allow direct interaction with existing Docker containers.

The main motivation to use Singularity over Docker is the fact that it's been developed with HPC systems in mind, to solve those specific problems:

  • security: a user in the container is the same user as the one running the container, so no privilege escalation possible,
  • ease of deployment: no daemon running as root on each node, a container is simply an executable,
  • no need to mount filesystems or do bind mappings to access devices,
  • ability to run MPI jobs based on containers,
  • and more...

More documentation#

The following documentation specifically intended for using Singularity on Sherlock. For more complete documentation about building and running containers with Singularity, please see the Singularity documentation.

Singularity on Sherlock#

As announced during the SC'18 Supercomputing Conference, Singularity is an integral part of the Sherlock cluster, and Singularity commands can be executed natively on any login or compute node, without the need to load any additional module1.

Importing containers#

Pre-built containers can be obtained from a variety of sources. For instance:

  • DockerHub contains containers for various software packages, which can be directly used with Singularity,
  • SingularityHub is a registry for scientific linux containers,
  • the NVIDIA GPU Cloud registry for GPU-optimized containers,
  • many individual projects contain specific instructions for installation via Docker and/or Singularity, and may provide pre-built images in other locations.

To illustrate how Singularity can import and run Docker containers, here's an example how to install and run the OpenFOAM CFD solver using Singularity. OpenFOAM can be quite difficult to install manually, but Singularity makes it very easy.

Interactive or batch usage

This example shows how to use Singularity interactively, but Singularity containers can be run in batch jobs as well.

The first step is to request an interactive shell. Singularity images can be pulled directly on compute nodes, and Singularity uses multiple CPU cores when assembling the image, so requesting multiple cores in your job can make the pull operation faster:

$ srun -c 4 --pty bash
+

We recommend storing Singularity images in $GROUP_HOME, as container images can take significant space in your $HOME directory.

$ mkdir -p $GROUP_HOME/$USER/simg
+$ cd $GROUP_HOME/$USER/simg
+

Then, the OpenFOAM container could be pulled directly from DockerHub by Singularity. This can take a moment to complete:

$ singularity pull docker://openfoam/openfoam6-paraview54
+Docker image path: index.docker.io/openfoam/openfoam6-paraview54:latest
+Cache folder set to /scratch/users/kilian/.singularity/docker
+Importing: base Singularity environment
+Exploding layer: sha256:1be7f2b886e89a58e59c4e685fcc5905a26ddef3201f290b96f1eff7d778e122.tar.gz
+[...]
+Building Singularity image...
+Singularity container built: ./openfoam6-paraview54.simg
+Cleaning up...
+Done. Container is at: ./openfoam6-paraview54.simg
+

Running containers#

Once the image is downloaded, you are ready to run OpenFOAM from the container. The singularity shell command can be used to start the container, and run a shell within that image:

By default on Sherlock, all the filesystems that are available on the compute node will also be available in the container. If you want to start your shell in a specific directory, you can use the --pwd /path/ option. For instance, we'll create a /tmp/openfoam_test/ directory to store our tests results (that will be wiped out at the end of the job), and start the container shell there:

$ mkdir /tmp/openfoam_test
+$ singularity shell --pwd /tmp/openfoam_test openfoam6-paraview54.simg
+Singularity: Invoking an interactive shell within container...
+Singularity openfoam6-paraview54.simg:/tmp/openfoam_test>
+

You're now in the container, as denoted by the shell prompt (Singularity[...].simg:[path]>), which is different from the prompt displayed on the compute node (which usually looks like [login]@[compute_node] [path]$.

OpenFOAM provides a convenience script that can be sourced to make OpenFOAM commands directly accessible and set a few useful environment variables:

> source /opt/openfoam6/etc/bashrc
+

Now, we can run a simple example using OpenFOAM:

> cp -r $FOAM_TUTORIALS/incompressible/simpleFoam/pitzDaily .
+> cd pitzDaily
+> blockMesh
+[...]
+End
+
+> simpleFoam
+/*---------------------------------------------------------------------------*\
+  =========                 |
+  \\      /  F ield         | OpenFOAM: The Open Source CFD Toolbox
+   \\    /   O peration     | Website:  https://openfoam.org
+    \\  /    A nd           | Version:  6
+     \\/     M anipulation  |
+\*---------------------------------------------------------------------------*/
+Build  : 6-1a0c91b3baa8
+Exec   : simpleFoam
+Date   : Oct 05 2018
+Time   : 23:37:30
+Host   : "sh01-06n33.int"
+PID    : 14670
+I/O    : uncollated
+Case   : /tmp/openfoam_test/pitzDaily
+nProcs : 1
+sigFpe : Enabling floating point exception trapping (FOAM_SIGFPE).
+fileModificationChecking : Monitoring run-time modified files using timeStampMaster (fileModificationSkew 10)
+allowSystemOperations : Allowing user-supplied system call operations
+
+// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
+Create time
+[...]
+SIMPLE solution converged in 288 iterations
+
+streamLine streamlines write:
+    seeded 10 particles
+    Tracks:10
+    Total samples:11980
+    Writing data to "/tmp/openfoam_test/pitzDaily/postProcessing/sets/streamlines/288"
+End
+
+>
+

When the simulation is done, you can exit the container with:

> exit
+

Because the container can see all the compute node's filesystems, the simulation output will be available in /tmp/openfoam_test after you exit the container:

$ ls /tmp/openfoam_test/pitzDaily/postProcessing/
+sets
+

GPU-enabled containers#

Sherlock also supports the use of container images provided by NVIDIA in the NVIDIA GPU Cloud (NGC). This registry provides GPU-accelerated containers for the most popular HPC and deep-learning scientific applications.

GPU support

Containers provided on NGC are only supported on Pascal and Volta architectures (TITAN Xp, Tesla P40, P100 or V100). For GPUs from the previous generations (GTX TITAN Black/X, Tesla K20/K80), things may or may not work.

We recommend making sure to select a supported GPU generation by adding the following directive to your batch script when submitting a job to run GPU-enabled containers from NGC:

#SBATCH -C "GPU_GEN:PSC|GPU_GEN:VLT"
+

Pulling NGC images#

As before, we start by requesting an interactive shell with multiple CPU cores, loading the Singularity module and moving the directory where we'll save those images:

$ srun -c 4 --pty bash
+$ cd $GROUP_HOME/simg
+

A GPU is not required for pulling GPU-enabled containers

GPU-enabled containers can be pulled on any node, including nodes without a GPU. But their execution requires a GPU and thus, they need to be executed within a GPU job. See the GPU job section for more information.

To be able to pull an image from NGC, authentication credentials must be set. Users need to register and create an NGC API key, complete details could be found in the NGC Getting Started Guide.

You can then set the following environment variable to allow Singularity to authenticate with NGC:

$ export SINGULARITY_DOCKER_USERNAME='$oauthtoken'
+$ export SINGULARITY_DOCKER_PASSWORD=<NVIDIA NGC API key>
+

Note

The SINGULARITY_DOCKER_USERNAME environment variable must be set to the literal $oauthtoken string, for every user. It should not be replaced by anything else. Only the API key is specific to each user.

Once credentials are set in the environment, container images can be pulled from the NGC registry normally.

The general form of the Singularity command used to pull NGC containers is: $ singularity pull docker://nvcr.io/<registry>/<app:tag>

For example to pull the NAMD NGC container tagged with version 2.12-171025 the corresponding command would be:

$ singularity pull docker://nvcr.io/hpc/namd:2.12-171025
+

After this command has finished, we'll have a Singularity image file in the current directory, named namd-2.12-171025.simg.

Running NGC containers#

Instructions about running NGC containers are provided on the NGC website, under each application:

NAMD on NGC

Each application comes with specific running instructions, so we recommend to follow the container's particular guidelines before running it with Singularity.

Containers that lack Singularity documentation have not been tested with Singularity.

Since all NGC containers are optimized for GPU acceleration, they will always be executed with the --nv Singularity option, to enable GPU support within the container.

We also need to submit a job requesting a GPU to run GPU-enabled containers. For instance:

$ srun -p gpu -c 4 --gres gpu:1 --pty bash
+

This will start an interactive shell on a GPU node, with 4 CPU cores and 1 GPU.

The NAMD container that was pulled just before can now be started with the following commands. We start by creating a temporary directory to hold the execution results, and start the container using this as the current directory:

$ mkdir /tmp/namd_test
+$ singularity shell --nv --pwd /tmp/namd_test $GROUP_HOME/simg/namd-2.12-171025.simg
+Singularity: Invoking an interactive shell within container...
+
+Singularity namd-2.12-171025.simg:/tmp/namd_test>
+

From there, we can run a NAMD test to verify that everything is working as expected.

> cp -r /workspace/examples .
+> /opt/namd/namd-multicore +p4 +idlepoll examples/apoa1/apoa1.namd
+Charm++: standalone mode (not using charmrun)
+Charm++> Running in Multicore mode:  4 threads
+Charm++> Using recursive bisection (scheme 3) for topology aware partitions
+Converse/Charm++ Commit ID: v6.8.2
+[...]
+Info: Built with CUDA version 9000
+Did not find +devices i,j,k,... argument, using all
+Pe 1 physical rank 1 will use CUDA device of pe 2
+Pe 3 physical rank 3 will use CUDA device of pe 2
+Pe 0 physical rank 0 will use CUDA device of pe 2
+Pe 2 physical rank 2 binding to CUDA device 0 on sh02-14n13.int: 'TITAN Xp'  Mem: 12196MB  Rev: 6.1
+Info: NAMD 2.12 for Linux-x86_64-multicore-CUDA
+[...]
+Info: SIMULATION PARAMETERS:
+Info: TIMESTEP               1
+[...]
+ENERGY:    2000     20247.5090     20325.4554      5719.0088       183.9328        -340639.3103     25366.3986         0.0000         0.0000     46364.9951        -222432.0107       168.6631   -268797.0057   -222054.5175       168.8733          -1129.9509     -1799.6459    921491.4634     -2007.8380     -2007.4145
+
+WRITING EXTENDED SYSTEM TO OUTPUT FILE AT STEP 2000
+WRITING COORDINATES TO OUTPUT FILE AT STEP 2000
+The last position output (seq=-2) takes 0.001 seconds, 559.844 MB of memory in use
+WRITING VELOCITIES TO OUTPUT FILE AT STEP 2000
+The last velocity output (seq=-2) takes 0.001 seconds, 559.844 MB of memory in use
+====================================================
+
+WallClock: 17.593451  CPUTime: 17.497925  Memory: 559.843750 MB
+[Partition 0][Node 0] End of program
+

The simulation should take a few seconds to run. You can verify that it correctly executed on a GPU in the output above. When it's done, you can exit the container with:

> exit
+

Because the container can see all the compute node's filesystems, the simulation output will be available in /tmp/named_test after you exit the container:

$ cd /tmp/namd_test/examples/apoa1/
+$ ls apoa1-out*
+apoa1-out.coor  apoa1-out.vel  apoa1-out.xsc
+

Building your own containers#

Building Singularity containers requires root privileges, and as such, cannot be done on Sherlock directly.

If you need to modify existing containers or build your own from scratch, The recommended workflow is to prepare and build your containers on your local Linux machine (it could either be a workstation, a laptop or a virtual machine), transfer the resulting container image to Sherlock, and run it there.

For complete details about how to build Singularity containers, please refer to the Singularity documentation.


  1. For more information about using modules on Sherlock, please see the software modules documentation

\ No newline at end of file diff --git a/docs/software/containers/images/ngc_namd.png b/docs/software/containers/images/ngc_namd.png new file mode 100644 index 000000000..716360901 Binary files /dev/null and b/docs/software/containers/images/ngc_namd.png differ diff --git a/docs/software/containers/index.html b/docs/software/containers/index.html new file mode 100644 index 000000000..ed8bc4ba7 --- /dev/null +++ b/docs/software/containers/index.html @@ -0,0 +1 @@ + Index - Sherlock

Index

Introduction#

Containers are a solution to the problem of how to get software to run reliably when moved from one computing environment to another. They also resolve installation problems by packaging all the dependencies of an application within a self-sustainable image, a.k.a a container.

What's a container?

Put simply, a container consists of an entire runtime environment: an application, plus all its dependencies, libraries and other binaries, and configuration files needed to run it, bundled into one package. By containerizing the application platform and its dependencies, differences in OS distributions and underlying infrastructure are abstracted away.

Container solutions#

There are several ways to run containers in general, and on Sherlock specifically.

  • Apptainer


    Apptainer (formerly Singularity) is an open source container platform designed to run complex applications on high-performance computing (HPC) clusters in a simple, portable, and reproducible way.

    More information

  • More to come...

\ No newline at end of file diff --git a/docs/software/containers/singularity/index.html b/docs/software/containers/singularity/index.html new file mode 100644 index 000000000..833e1fcf0 --- /dev/null +++ b/docs/software/containers/singularity/index.html @@ -0,0 +1,107 @@ + Singularity - Sherlock

Singularity#

Singularity is an open source container platform designed to run complex applications on high-performance computing (HPC) clusters in a simple, portable, and reproducible way. It's like Docker, but for HPC systems.

Why not Docker?#

Docker has long been the reference and the most popular container framework in DevOps and Enterprise IT environments, so why not use Docker on Sherlock? Well, for a variety of technical reasons, mostly related to security.

Docker has never been designed nor developed to run in multi-tenants environments, and even less on HPC clusters. Specifically:

  • Docker requires a daemon running as root on all of the compute nodes, which has serious security implications,
  • all authenticated actions (such as login, push ...) are also executed as root, meaning that multiple users can't use those functions on the same node,
  • Docker uses cgroups to isolate containers, as does the Slurm scheduler, which uses cgroups to allocate resources to jobs and enforce limits. Those uses are unfortunately conflicting.
  • but most importantly, allowing users to run Docker containers will give them root privileges inside that container, which will in turn let them access any of the clusters' filesystems as root. This opens the door to user impersonation, inappropriate file tampering or stealing, and is obviously not something that can be allowed on a shared resource.

That last point is certainly the single most important reason why we won't use Docker on Sherlock.

Why Singularity?#

Singularity is Docker for HPC systems

Singularity allows running Docker containers natively, and is a perfect replacement for Docker on HPC systems such as Sherlock. That means that existing Docker container can be directly imported and natively run with SIngularity.

Despite Docker's shortcomings on HPC systems, the appeal of containers for scientific computing is undeniable, which is why we provide Singularity on Sherlock. Singularity is an alternative container framework, especially designed to run scientific applications on HPC clusters.

Singularity provides the same functionalities as Docker, without any of the drawbacks listed above. Using a completely different implementation, it doesn't require any privilege to run containers, and allow direct interaction with existing Docker containers.

The main motivation to use Singularity over Docker is the fact that it's been developed with HPC systems in mind, to solve those specific problems:

  • security: a user in the container is the same user as the one running the container, so no privilege escalation possible,
  • ease of deployment: no daemon running as root on each node, a container is simply an executable,
  • no need to mount filesystems or do bind mappings to access devices,
  • ability to run MPI jobs based on containers,
  • and more...

More documentation#

The following documentation specifically intended for using Singularity on Sherlock. For more complete documentation about building and running containers with Singularity, please see the Singularity documentation.

Singularity on Sherlock#

As announced during the SC'18 Supercomputing Conference, Singularity is an integral part of the Sherlock cluster, and Singularity commands can be executed natively on any login or compute node, without the need to load any additional module1.

Importing containers#

Pre-built containers can be obtained from a variety of sources. For instance:

  • DockerHub contains containers for various software packages, which can be directly used with Singularity,
  • SingularityHub is a registry for scientific linux containers,
  • the NVIDIA GPU Cloud registry for GPU-optimized containers,
  • many individual projects contain specific instructions for installation via Docker and/or Singularity, and may provide pre-built images in other locations.

To illustrate how Singularity can import and run Docker containers, here's an example how to install and run the OpenFOAM CFD solver using Singularity. OpenFOAM can be quite difficult to install manually, but Singularity makes it very easy.

Interactive or batch usage

This example shows how to use Singularity interactively, but Singularity containers can be run in batch jobs as well.

The first step is to request an interactive shell. Singularity images can be pulled directly on compute nodes, and Singularity uses multiple CPU cores when assembling the image, so requesting multiple cores in your job can make the pull operation faster:

$ srun -c 4 --pty bash
+

We recommend storing Singularity images in $GROUP_HOME, as container images can take significant space in your $HOME directory.

$ mkdir -p $GROUP_HOME/$USER/simg
+$ cd $GROUP_HOME/$USER/simg
+

Then, the OpenFOAM container could be pulled directly from DockerHub by Singularity. This can take a moment to complete:

$ singularity pull docker://openfoam/openfoam6-paraview54
+Docker image path: index.docker.io/openfoam/openfoam6-paraview54:latest
+Cache folder set to /scratch/users/kilian/.singularity/docker
+Importing: base Singularity environment
+Exploding layer: sha256:1be7f2b886e89a58e59c4e685fcc5905a26ddef3201f290b96f1eff7d778e122.tar.gz
+[...]
+Building Singularity image...
+Singularity container built: ./openfoam6-paraview54.simg
+Cleaning up...
+Done. Container is at: ./openfoam6-paraview54.simg
+

Running containers#

Once the image is downloaded, you are ready to run OpenFOAM from the container. The singularity shell command can be used to start the container, and run a shell within that image:

By default on Sherlock, all the filesystems that are available on the compute node will also be available in the container. If you want to start your shell in a specific directory, you can use the --pwd /path/ option. For instance, we'll create a /tmp/openfoam_test/ directory to store our tests results (that will be wiped out at the end of the job), and start the container shell there:

$ mkdir /tmp/openfoam_test
+$ singularity shell --pwd /tmp/openfoam_test openfoam6-paraview54.simg
+Singularity: Invoking an interactive shell within container...
+Singularity openfoam6-paraview54.simg:/tmp/openfoam_test>
+

You're now in the container, as denoted by the shell prompt (Singularity[...].simg:[path]>), which is different from the prompt displayed on the compute node (which usually looks like [login]@[compute_node] [path]$.

OpenFOAM provides a convenience script that can be sourced to make OpenFOAM commands directly accessible and set a few useful environment variables:

> source /opt/openfoam6/etc/bashrc
+

Now, we can run a simple example using OpenFOAM:

> cp -r $FOAM_TUTORIALS/incompressible/simpleFoam/pitzDaily .
+> cd pitzDaily
+> blockMesh
+[...]
+End
+
+> simpleFoam
+/*---------------------------------------------------------------------------*\
+  =========                 |
+  \\      /  F ield         | OpenFOAM: The Open Source CFD Toolbox
+   \\    /   O peration     | Website:  https://openfoam.org
+    \\  /    A nd           | Version:  6
+     \\/     M anipulation  |
+\*---------------------------------------------------------------------------*/
+Build  : 6-1a0c91b3baa8
+Exec   : simpleFoam
+Date   : Oct 05 2018
+Time   : 23:37:30
+Host   : "sh01-06n33.int"
+PID    : 14670
+I/O    : uncollated
+Case   : /tmp/openfoam_test/pitzDaily
+nProcs : 1
+sigFpe : Enabling floating point exception trapping (FOAM_SIGFPE).
+fileModificationChecking : Monitoring run-time modified files using timeStampMaster (fileModificationSkew 10)
+allowSystemOperations : Allowing user-supplied system call operations
+
+// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
+Create time
+[...]
+SIMPLE solution converged in 288 iterations
+
+streamLine streamlines write:
+    seeded 10 particles
+    Tracks:10
+    Total samples:11980
+    Writing data to "/tmp/openfoam_test/pitzDaily/postProcessing/sets/streamlines/288"
+End
+
+>
+

When the simulation is done, you can exit the container with:

> exit
+

Because the container can see all the compute node's filesystems, the simulation output will be available in /tmp/openfoam_test after you exit the container:

$ ls /tmp/openfoam_test/pitzDaily/postProcessing/
+sets
+

GPU-enabled containers#

Sherlock also supports the use of container images provided by NVIDIA in the NVIDIA GPU Cloud (NGC). This registry provides GPU-accelerated containers for the most popular HPC and deep-learning scientific applications.

GPU support

Containers provided on NGC are only supported on Pascal and Volta architectures (TITAN Xp, Tesla P40, P100 or V100). For GPUs from the previous generations (GTX TITAN Black/X, Tesla K20/K80), things may or may not work.

We recommend making sure to select a supported GPU generation by adding the following directive to your batch script when submitting a job to run GPU-enabled containers from NGC:

#SBATCH -C "GPU_GEN:PSC|GPU_GEN:VLT"
+

Pulling NGC images#

As before, we start by requesting an interactive shell with multiple CPU cores, loading the Singularity module and moving the directory where we'll save those images:

$ srun -c 4 --pty bash
+$ cd $GROUP_HOME/simg
+

A GPU is not required for pulling GPU-enabled containers

GPU-enabled containers can be pulled on any node, including nodes without a GPU. But their execution requires a GPU and thus, they need to be executed within a GPU job. See the GPU job section for more information.

To be able to pull an image from NGC, authentication credentials must be set. Users need to register and create an NGC API key, complete details could be found in the NGC Getting Started Guide.

You can then set the following environment variable to allow Singularity to authenticate with NGC:

$ export SINGULARITY_DOCKER_USERNAME='$oauthtoken'
+$ export SINGULARITY_DOCKER_PASSWORD=<NVIDIA NGC API key>
+

Note

The SINGULARITY_DOCKER_USERNAME environment variable must be set to the literal $oauthtoken string, for every user. It should not be replaced by anything else. Only the API key is specific to each user.

Once credentials are set in the environment, container images can be pulled from the NGC registry normally.

The general form of the Singularity command used to pull NGC containers is: $ singularity pull docker://nvcr.io/<registry>/<app:tag>

For example to pull the NAMD NGC container tagged with version 2.12-171025 the corresponding command would be:

$ singularity pull docker://nvcr.io/hpc/namd:2.12-171025
+

After this command has finished, we'll have a Singularity image file in the current directory, named namd-2.12-171025.simg.

Running NGC containers#

Instructions about running NGC containers are provided on the NGC website, under each application:

NAMD on NGC

Each application comes with specific running instructions, so we recommend to follow the container's particular guidelines before running it with Singularity.

Containers that lack Singularity documentation have not been tested with Singularity.

Since all NGC containers are optimized for GPU acceleration, they will always be executed with the --nv Singularity option, to enable GPU support within the container.

We also need to submit a job requesting a GPU to run GPU-enabled containers. For instance:

$ srun -p gpu -c 4 --gres gpu:1 --pty bash
+

This will start an interactive shell on a GPU node, with 4 CPU cores and 1 GPU.

The NAMD container that was pulled just before can now be started with the following commands. We start by creating a temporary directory to hold the execution results, and start the container using this as the current directory:

$ mkdir /tmp/namd_test
+$ singularity shell --nv --pwd /tmp/namd_test $GROUP_HOME/simg/namd-2.12-171025.simg
+Singularity: Invoking an interactive shell within container...
+
+Singularity namd-2.12-171025.simg:/tmp/namd_test>
+

From there, we can run a NAMD test to verify that everything is working as expected.

> cp -r /workspace/examples .
+> /opt/namd/namd-multicore +p4 +idlepoll examples/apoa1/apoa1.namd
+Charm++: standalone mode (not using charmrun)
+Charm++> Running in Multicore mode:  4 threads
+Charm++> Using recursive bisection (scheme 3) for topology aware partitions
+Converse/Charm++ Commit ID: v6.8.2
+[...]
+Info: Built with CUDA version 9000
+Did not find +devices i,j,k,... argument, using all
+Pe 1 physical rank 1 will use CUDA device of pe 2
+Pe 3 physical rank 3 will use CUDA device of pe 2
+Pe 0 physical rank 0 will use CUDA device of pe 2
+Pe 2 physical rank 2 binding to CUDA device 0 on sh02-14n13.int: 'TITAN Xp'  Mem: 12196MB  Rev: 6.1
+Info: NAMD 2.12 for Linux-x86_64-multicore-CUDA
+[...]
+Info: SIMULATION PARAMETERS:
+Info: TIMESTEP               1
+[...]
+ENERGY:    2000     20247.5090     20325.4554      5719.0088       183.9328        -340639.3103     25366.3986         0.0000         0.0000     46364.9951        -222432.0107       168.6631   -268797.0057   -222054.5175       168.8733          -1129.9509     -1799.6459    921491.4634     -2007.8380     -2007.4145
+
+WRITING EXTENDED SYSTEM TO OUTPUT FILE AT STEP 2000
+WRITING COORDINATES TO OUTPUT FILE AT STEP 2000
+The last position output (seq=-2) takes 0.001 seconds, 559.844 MB of memory in use
+WRITING VELOCITIES TO OUTPUT FILE AT STEP 2000
+The last velocity output (seq=-2) takes 0.001 seconds, 559.844 MB of memory in use
+====================================================
+
+WallClock: 17.593451  CPUTime: 17.497925  Memory: 559.843750 MB
+[Partition 0][Node 0] End of program
+

The simulation should take a few seconds to run. You can verify that it correctly executed on a GPU in the output above. When it's done, you can exit the container with:

> exit
+

Because the container can see all the compute node's filesystems, the simulation output will be available in /tmp/named_test after you exit the container:

$ cd /tmp/namd_test/examples/apoa1/
+$ ls apoa1-out*
+apoa1-out.coor  apoa1-out.vel  apoa1-out.xsc
+

Building your own containers#

Building Singularity containers requires root privileges, and as such, cannot be done on Sherlock directly.

If you need to modify existing containers or build your own from scratch, The recommended workflow is to prepare and build your containers on your local Linux machine (it could either be a workstation, a laptop or a virtual machine), transfer the resulting container image to Sherlock, and run it there.

For complete details about how to build Singularity containers, please refer to the Singularity documentation.


  1. For more information about using modules on Sherlock, please see the software modules documentation

\ No newline at end of file diff --git a/docs/software/index.html b/docs/software/index.html new file mode 100644 index 000000000..7e71a8b76 --- /dev/null +++ b/docs/software/index.html @@ -0,0 +1 @@ + Software on Sherlock - Sherlock

Software on Sherlock#

Available software#

A set of supported software installations is provided for use on Sherlock. This software is made available through a Software Modules system. For the complete list of available software, please refer to the Software List page.

Licensed software can be used on Sherlock, under certain conditions. Feel free to contact us for more details or if you have questions. For more information about purchasing software licenses, you can contact the Stanford Software Licensing office.

Installation requests#

Installation requests

The Stanford Research Computing team installs, for general use, a set of libraries, tools and software applications that are commonly used across many research groups. However, our staff resources are quite limited and don't allow us to build nor maintain custom software applications that may be requested by or be of use to a small number of users.

We strongly encourage users to build custom and field- or domain-specific software themselves, and install it in their own personal or group shared directories. That way, they can share the software installations with the rest of the users in their group, if necessary.

Users may even maintain and publish their own local module files to dynamically configure a running environment to use the software. They could share those modules with other users to simplify the use of their own custom software installations.

Installing your own software

For more information about building your own software on Sherlock, please see the Software Installation page

If the software you need is not in the list of available software, and you have trouble installing it on your own, please contact us with as much details about the package as possible, and we will try to help you install it.

If it's a widely used software that could benefit multiple users across different scientific communities, we will consider install it globally as resources permit1.

Contributed software#

PI groups and labs can share their software installations and modules with the whole Sherlock user community, and let everyone benefit from their tuning efforts and software developments.

Contributed software is supported and maintained by each lab, and contact information is usually provided in the contribs module. See the Modules page for more information about using software modules on Sherlock.

If you're interested in sharing your software installations beyond your own group on Sherlock, please let us know, and we'll get in touch.


  1. Software requests, including version upgrades, are fulfilled in the order they are received, and as time permits. We don't have any dedicated team for software installations, and requests are handled along with other duties, typically within two to three weeks of being received. 

\ No newline at end of file diff --git a/docs/software/install/index.html b/docs/software/install/index.html new file mode 100644 index 000000000..19efbc485 --- /dev/null +++ b/docs/software/install/index.html @@ -0,0 +1 @@ + Installation - Sherlock

Installation

Software installation requests

For more information about software installation requests, please see the Software Overview page

If the software package or version you need is not available in the list of provided software, you may compile and install it yourself. The recommended location for user-installed software is the $GROUP_HOME group shared directory, which is snapshotted and replicated off-site, and can easily be shared with members of a research group.

🚧 Work in progress 🚧

This page is a work in progress and is not complete yet. We are actively working on adding more content and information.

\ No newline at end of file diff --git a/docs/software/list/index.html b/docs/software/list/index.html new file mode 100644 index 000000000..ec69f6ec7 --- /dev/null +++ b/docs/software/list/index.html @@ -0,0 +1 @@ + List - Sherlock

Software list#

The full list of software centrally installed and managed on Sherlock is in the tables below.

Permanent work in progress

Software installations on Sherlock are an ever ongoing process. We're continuously adding new software to the list. If you're looking for something that is not in the list, there may be other options.

Subscribe to updates

Never want to miss a software update again? Stay up-to-date with new software updates by following the Sherlock software update RSS feed.

Categories#

Software modules on Sherlock are organized in categories, by scientific field or functional class. It means that you will have to first load a category module before getting access to individual modules. The math and devel categories are loaded by default. See the Modules page for further details and examples.

We currently provide 587 software modules, in 7 categories, covering 94 fields of science:

  • biology clinical science, computational biology, cryo-em, genomics, molecular biology, neurology, pathology, phylogenetics, population genetics, radiology, workflow management

  • chemistry cheminformatics, computational chemistry, crystallography, docking, electrostatics, molecular dynamics, quantum chemistry, tools

  • devel build, compiler, data, data analytics, debug, engine, framework, IDE, language, lib, mpi, networking, parser, profiling, runtime

  • math computational geometry, deep learning, graph computing, lib, linear algebra, machine learning, numerical analysis, numerical library, optimization, scientific computing, statistics, symbolic, technical computing, topic modelling

  • physics astronomy, CFD, cliemate modeling, climate modeling, geophysics, geoscience, lib, magnetism, materials science, micromagnetics, particle, photonics, quantum information science, quantum mechanics

  • system backup, benchmark, checkpointing, cloud interface, compiler, compression, containers, database, document management, document processing, file management, file transfer, framework, hardware, job management, language, libs, media, performance, resource monitoring, scm, shell, testing, tool, tools

  • viz data, gis, graphs, imaging, molecular visualization, plotting, remote display

Licensed software

Access to software modules marked with in the tables below is restricted to properly licensed user groups.

Stanford Research Computing is not funded to provide commercial software on Sherlock and researchers are responsible for the costs of purchasing and renewing commercial software licenses. For more information, please feel free to contact us and see the Stanford Software Licensing page for purchasing information.

Additional flags and features

Some of the modules listed below have been built to support specific architectures or parallel execution modes:

  • versions marked with support GPU acceleration
  • versions marked with support MPI parallel execution
  • versions marked with are the default version for the module

biology#

Field Module name Version(s) URL Description
clinical science simvascular 20180704
Website Simvascular is a blood flow simulation and analysis toolkit. This module provides the svFSI (Fluid Solid Interaction) solver.
computational biology py-biopython 1.70_py27
1.79_py36
1.79_py39
Website Biopython is a set of freely available tools for biological computation written in Python.
computational biology rosetta 3.8 
3.14 
Website Rosetta is the premier software suite for modeling macromolecular structures. As a flexible, multi-purpose application, it includes tools for structure prediction, design, and remodeling of proteins and nucleic acids.
cryo-em ctffind 4.1.13
Website ctffind is a program for finding CTFs of electron micrographs.
cryo-em eman2 2.2  
2.91  
Website EMAN2 is a broadly based greyscale scientific image processing suite with a primary focus on processing data from transmission electron microscopes.
cryo-em imod 4.9.12 
4.11.5 
Website IMOD is a set of image processing, modeling and display programs used for tomographic reconstruction and for 3D reconstruction of EM serial sections and optical sections.
cryo-em motioncor2 1.3.1  
1.5.0 
1.6.4 
Website MotionCor2 is a multi-GPU accelerated program which corrects anisotropic image motion at the single pixel level.
cryo-em py-topaz 0.2.4_py36 
0.2.5_py39 
Website A pipeline for particle detection in cryo-electron microscopy images using convolutional neural networks trained from positive and unlabeled examples.
cryo-em relion 2.0.3  
2.1  
4.0.1  
Website RELION (for REgularised LIkelihood OptimisatioN, pronounce rely-on) is a stand-alone computer program that employs an empirical Bayesian approach to refinement of (multiple) 3D reconstructions or 2D class averages in electron cryo-microscopy (cryo-EM).
genomics angsd 0.919
0.931
Website ANGSD is a software for analyzing next generation sequencing data.
genomics augustus 3.3.2
Website AUGUSTUS is a program that predicts genes in eukaryotic genomic sequences.
genomics bamtools 2.5.1
Website BamTools is a project that provides both a C++ API and a command-line toolkit for reading, writing, and manipulating BAM (genome alignment) files.
genomics bcftools 1.6
1.8
1.16
Website BCFtools is a program for variant calling and manipulating files in the Variant Call Format (VCF) and its binary counterpart BCF.
genomics bcl-convert 4.2.7
Website The BCL Convert App generates demultiplexed FASTQ files from a run as input.
genomics bcl2fastq 2.20
Website The bcl2fastq2 conversion software can be used to convert BCL files from MiniSeq, MiSeq, NextSeq, HiSeq, iSeq and NovaSeq sequening systems.
genomics bedops 2.4.40
Website BEDOPS is an open-source command-line toolkit that performs highly efficient and scalable Boolean and other set operations, statistical calculations, archiving, conversion and other management of genomic data of arbitrary scale.
genomics bedtools 2.27.1
2.30.0
Website The bedtools utilities are a swiss-army knife of tools for a wide-range of genomics analysis tasks.
genomics bgen 1.1.4
Website bgen is the reference implementation of the BGEN format, a binary file format for imputed genotype and haplotype data.
genomics bowtie 1.2.2
Website Bowtie is an ultrafast, memory-efficient short read aligner.
genomics bowtie2 2.3.4.1
Website Bowtie 2 is an ultrafast and memory-efficient tool for aligning sequencing reads to long reference sequences.
genomics breseq 0.38.1
Website breseq is a computational pipeline for finding mutations relative to a reference sequence in short-read DNA resequencing data.
genomics bwa 0.7.17
Website BWA (Burrows-Wheeler Aligner) is a software package for mapping low-divergent sequences against a large reference genome, such as the human genome.
genomics canu 1.8
Website A single molecule sequence assembler for genomes large and small.
genomics cellranger 7.1.0
Website Cell Ranger is a set of analysis pipelines that process Chromium single-cell RNA-seq output to align reads, generate gene-cell matrices and perform clustering and gene expression analysis.
genomics cellranger-atac 2.1.0
Website Cell Ranger ATAC is a set of analysis pipelines that process Chromium Single Cell ATAC data.
genomics cufflinks 2.2.1
Website Cufflinks assembles transcripts, estimates their abundances, and tests for differential expression and regulation in RNA-Seq samples.
genomics dorado 0.3.4
0.5.3
Website Dorado is a high-performance, easy-to-use, open source basecaller for Oxford Nanopore reads.
genomics fastqc 0.11.8
Website FastQC aims to provide a simple way to do some quality control checks on raw sequence data coming from high throughput sequencing pipelines.
genomics fastx_toolkit 0.0.14
Website The FASTX-Toolkit is a collection of command line tools for Short-Reads FASTA/FASTQ files preprocessing.
genomics freebayes 1.2.0
Website FreeBayes is a Bayesian genetic variant detector designed to find small polymorphisms.
genomics gatk 4.1.0.0
4.1.4.1
Website GATK (Genome Analysis Toolkit) offers a wide variety of tools with a primary focus on variant discovery and genotyping.
genomics gemma 0.98.5
Website GEMMA is a software toolkit for fast application of linear mixed models (LMMs) and related models to genome-wide association studies (GWAS) and other large-scale data sets.
genomics hic-pro 2.10.0
Website HiC-Pro: An optimized and flexible pipeline for Hi-C data processing.
genomics hisat2 2.1.0
Website HISAT2 is a fast and sensitive alignment program for mapping next-generation sequencing reads (both DNA and RNA) to a population of human genomes (as well as to a single reference genome).
genomics htslib 1.6
1.8
1.10.2
1.14
1.16
Website C library for high-throughput sequencing data formats.
genomics jellyfish 2.2.10
Website A fast multi-threaded k-mer counter.
genomics kallisto 0.44.0 
0.46.1
0.50.1
Website kallisto is a program for quantifying abundances of transcripts from RNA-Seq data using high-throughput sequencing reads.
genomics metal 20110325
Website The METAL software is designed to facilitate meta-analysis of large datasets (such as several whole genome scans) in a convenient, rapid and memory efficient manner.
genomics mixcr 2.1.12
4.6.0
Website MiXCR is a universal framework that processes big immunome data from raw sequences to quantitated clonotypes.
genomics ncbi-blast+ 2.6.0
2.7.1
2.11.0
Website NCBI BLAST+ is a suite of command-line tools to run BLAST (Basic Local Alignment Search Tool), an algorithm for comparing primary biological sequence information.
genomics ncbi-vdb 3.0.7
Website NCBI VDB is the database engine used by NCBI SRA tools.
genomics plink 1.07
1.90b5.3
2.0a1
2.0a2
Website PLINK is a free, open-source whole genome association analysis toolset, designed to perform a range of basic, large-scale analyses in a computationally efficient manner.
genomics popscle 0.1
Website popscle is a suite of population scale analysis tools for single-cell genomics data.
genomics py-busco 3.0.2_py27
Website Assessing genome assembly and annotation completeness with Benchmarking Universal Single-Copy Orthologs (BUSCO).
genomics py-bx-python 0.8.1_py27
0.8.13_py39
Website Tools for manipulating biological data, particularly multiple sequence alignments.
genomics py-cutadapt 1.18_py27 
1.18_py36
Website Cutadapt finds and removes adapter sequences, primers, poly-A tails and other types of unwanted sequence from your high-throughput sequencing reads.
genomics py-deeplabcut 2.2.3_py39 
Website A software package for animal pose estimation.
genomics py-deeptools 3.3.1_py36
Website Tools to process and analyze deep sequencing data.
genomics py-fithic 1.1.3_py27
Website Fit-Hi-C is a tool for assigning statistical confidence estimates to chromosomal contact maps produced by genome architecture assays.
genomics py-htseq 2.0.1_py39
Website HTSeq is a Python library to facilitate processing and analysis of data from high-throughput sequencing (HTS) experiments.
genomics py-macs2 2.1.1_py27
2.2.9.1_py39
Website MACS (Model-based Analysis of ChIP-Seq) implements a novel ChIP-Seq analysis method.
genomics py-mageck 0.5.9.4_py36
Website Model-based Analysis of Genome-wide CRISPR-Cas9 Knockout (MAGeCK) is a computational tool to identify important genes from the recent genome-scale CRISPR-Cas9 knockout screens technology.
genomics py-mapdamage 2.2.1_py36
Website mapDamage2 is a computational framework which tracks and quantifies DNA damage patterns among ancient DNA sequencing reads generated by Next-Generation Sequencing platforms.
genomics py-multiqc 1.6_py27 
1.6_py36
Website MultiQC is a reporting tool that parses summary statistics from results and log files generated by other bioinformatics tools.
genomics py-obitools 1.2.13_py27
Website OBITools is a set of programs designed for analyzing NGS data in a DNA metabarcoding context.
genomics py-orthofinder 2.5.4_py39
Website OrthoFinder is a fast, accurate and comprehensive platform for comparative genomics.
genomics py-pybedtools 0.8.0_py27
0.8.2_py36
0.9.0_py39
Website Pybedtools wraps and extends BEDTools and offers feature-level manipulations from within Python.
genomics py-pysam 0.14.1_py27
0.15.3_py36
0.18.0_py39
Website Pysam is a python module for reading, manipulating and writing genomic data sets.
genomics py-scanpy 1.8.2_py39
Website Scanpy is a scalable toolkit for analyzing single-cell gene expression data.
genomics py-vcf2gwas 0.8.9_py39
Website Python API for comprehensive GWAS analysis using GEMMA.
genomics py-vispr 0.4.17_py36
Website A visualization framework for CRISPR/Cas9 knockout screens, analyzed with MAGeCK.
genomics regenie 2.2.4
Website regenie is a C++ program for whole genome regression modelling of large genome-wide association studies.
genomics rsem 1.3.3
Website RSEM is a software package for estimating gene and isoform expression levels from RNA-Seq data.
genomics salmon 0.12.0
1.10.0
Website Highly-accurate & wicked fast transcript-level quantification from RNA-seq reads using lightweight alignments.
genomics samtools 1.6
1.8
1.16.1
Website Tools (written in C using htslib) for manipulating next-generation sequencing data.
genomics sentieon 201808.01 
202112.01 
Website Sentieon Genomics software is a set of software tools that perform analysis of genomic data obtained from DNA sequencing.
genomics shapeit 4.0.0 
4.2.2
Website SHAPEIT4 is a fast and accurate method for estimation of haplotypes (aka phasing) for SNP array and high coverage sequencing data.
genomics sra-tools 2.11.0
3.0.7
Website The SRA Toolkit and SDK from NCBI is a collection of tools and libraries for using data in the INSDC Sequence Read Archives.
genomics star 2.5.4b
2.7.10b
Website STAR: ultrafast universal RNA-seq aligner.
genomics stringtie 2.2.1
Website StringTie is a fast and highly efficient assembler of RNA-Seq alignments into potential transcripts.
genomics tophat 2.1.1
Website TopHat is a fast splice junction mapper for RNA-Seq reads.
genomics trim_galore 0.5.0
Website Trim Galore! is a wrapper script to automate quality and adapter trimming as well as quality control, with some added functionality to remove biased methylation positions for RRBS sequence files.
genomics trinity 2.8.4
2.13.1
Website Trinity RNA-Seq de novo transcriptome assembly.
genomics vcflib 1.0.0
Website A C++ library for parsing and manipulating VCF files.
genomics vcftools 0.1.15
Website VCFtools is a program package designed for working with VCF files, such as those generated by the 1000 Genomes Project.
genomics viennarna 2.5.1
Website A C code library and several stand-alone programs for the prediction and comparison of RNA secondary structures.
molecular biology dssp 4.0.3
Website DSSP is an application to assign secondary structure to proteins.
molecular biology libcifpp 3.0.0
Website Library to work with mmCIF and PDB files.
neurology afni 17.2.07
18.2.04
21.3.00
Website AFNI (Analysis of Functional NeuroImages) is a set of C programs for processing, analyzing, and displaying functional MRI (FMRI) data - a technique for mapping human brain activity.
neurology ants 2.1.0
2.3.1
2.4.0
Website ANTs computes high-dimensional mappings to capture the statistics of brain structure and function.
neurology bart 0.7.00 
Website BART is a toolbox for Computational Magnetic Resonance Imaging.
neurology dcm2niix 1.0.20171215
1.0.20211006
Website dcm2niix is a program esigned to convert neuroimaging data from the DICOM format to the NIfTI format.
neurology freesurfer 6.0.1
7.1.1
7.2.0
7.3.2
7.4.1
Website An open source software suite for processing and analyzing (human) brain MRI images.
neurology fsl 5.0.10  
6.0.7.10 
Website FSL is a comprehensive library of analysis tools for FMRI, MRI and DTI brain imaging data.
neurology mricron 20160502
Website MRIcron is a cross-platform NIfTI format image viewer.
neurology mrtrix 0.3.16
3.0.3
Website MRtrix3 provides a set of tools to perform various types of diffusion MRI analyses, from various forms of tractography through to next-generation group-level analyses.
neurology py-mdt 0.10.9_py36 
Website The Maastricht Diffusion Toolbox, MDT, is a framework and library for parallelized (GPU and multi-core CPU) diffusion Magnetic Resonance Imaging (MRI) modeling.
neurology py-nipype 1.1.3_py27
1.1.3_py36
Website Nipype is a Python project that provides a uniform interface to existing neuroimaging software and facilitates interaction between these packages within a single workflow.
neurology spm 12
Website The SPM software package has been designed for the analysis of brain imaging data sequences. The sequences can be a series of images from different cohorts, or time-series from the same subject.
neurology workbench 1.3.1
Website Connectome Workbench is an open source, freely available visualization and discovery tool used to map neuroimaging data, especially data generated by the Human Connectome Project.
pathology openslide 3.4.1
Website OpenSlide is a C library that provides a simple interface to read whole-slide images (also known as virtual slides).
pathology py-openslide-python 1.1.1_py27 
1.1.1_py36
Website OpenSlide Python is a Python interface to the OpenSlide library.
phylogenetics py-ete 3.0.0_py27
Website A Python framework for the analysis and visualization of trees.
population genetics py-admixfrog 0.6.1_py36
Website Admixfrog is a HMM to infer ancestry frogments (fragments) from low-coverage, contaminated data.
radiology nbia-data-retriever 4.2
Website The NBIA Data Retriever is an application to download radiology images from the TCIA Radiology Portal.
workflow management nextflow 23.04.3
Website Nextflow is a bioinformatics workflow manager that enables the development of portable and reproducible workflows.

chemistry#

Field Module name Version(s) URL Description
cheminformatics py-rdkit 2018.09.1_py27 
2018.09.1_py36
2022.09.1_py39
Website RDKit is a collection of cheminformatics and machine-learning software written in C++ and Python.
computational chemistry gaussian g16.A03  
g16.B01  
Website Gaussian is a general purpose computational chemistry software package.
computational chemistry libint 1.1.4
2.0.3
2.6.0
Website Libint computes molecular integrals.
computational chemistry libxc 3.0.0
5.2.2
Website Libxc is a library of exchange-correlation functionals for density-functional theory.
computational chemistry nwchem 6.8  
7.0.2  
Website NWChem is an ab initio computational chemistry software package which also includes quantum chemical and molecular dynamics functionality.
computational chemistry py-ase 3.14.1_py27
3.22.1_py39
Website The Atomic Simulation Environment (ASE) is a set of tools and Python modules for setting up, manipulating, running, visualizing and analyzing atomistic simulations.
computational chemistry schrodinger 2021-1    
2017-3   
2018-1   
2018-2   
2019-2   
2020-2   
2022-3   
2024-1   
Website Schrödinger Suites (Small-molecule Drug Discovery Suite, Material Science Suite, Biologics Suite) provide a set of molecular modelling software.
computational chemistry vasp 5.4.1    
6.1.1   
6.3.2   
6.4.1   
Website The Vienna Ab initio Simulation Package (VASP) is a computer program for atomic scale materials modelling, e.g. electronic structure calculations and quantum-mechanical molecular dynamics, from first principles.
crystallography clipper 2.1.20180802
Website Crystallographic automation and complex data manipulation libraries.
crystallography mmdb2 2.0.20
Website A C++ toolkit for working with macromolecular coordinate files.
crystallography ssm 1.4
Website A macromolecular superposition library.
crystallography vesta 3.4.4
Website VESTA is a 3D visualization program for structural models, volumetric data such as electron/nuclear densities, and crystal morphologies.
docking gnina 1.0.2 
Website A deep learning framework for molecular docking
electrostatics apbs 1.5
Website APBS solves the equations of continuum electrostatics for large biomolecular assemblages.
molecular dynamics gromacs 2016.3  
2018  
2021.3  
2023.1  
Website GROMACS is a versatile package to perform molecular dynamics, i.e. simulate the Newtonian equations of motion for systems with hundreds to millions of particles.
molecular dynamics lammps 20180316 
20200303  
20230802  
Website LAMMPS is a classical molecular dynamics code that models an ensemble of particles in a liquid, solid, or gaseous state.
molecular dynamics openmm 7.1.1 
Website A high performance toolkit for molecular simulation.
molecular dynamics plumed 2.3.2 
Website PLUMED is an open source library for free energy calculations in molecular systems.
molecular dynamics py-raspa2 2.0.3_py27
Website RASPA2 is a general purpose classical simulation package that can be used for the simulation of molecules in gases, fluids, zeolites, aluminosilicates, metal-organic frameworks, carbon nanotubes and external fields.
molecular dynamics qbox 1.65.0 
Website Qbox is a First-Principles Molecular Dynamics code.
molecular dynamics quip 20170901 
20220426 
Website The QUIP package is a collection of software tools to carry out molecular dynamics simulations.
quantum chemistry cp2k 4.1   
9.1  
Website CP2K is a quantum chemistry and solid state physics software package that can perform atomistic simulations of solid state, liquid, molecular, periodic, material, crystal, and biological systems.
quantum chemistry ocean 2.9.7 
Website OCEAN is a versatile and user-friendly package for calculating core edge spectroscopy including excitonic effects.
quantum chemistry orca 4.2.1 
5.0.0 
5.0.3 
Website ORCA is a flexible, efficient and easy-to-use general purpose tool for quantum chemistry.
quantum chemistry quantum-espresso 6.2.1 
6.6 
7.0 
7.1 
Website Quantum ESPRESSO is an integrated suite of Open-Source computer codes for electronic-structure calculations and materials modeling at the nanoscale. It is based on density-functional theory, plane waves, and pseudopotentials.
quantum chemistry quantum-espresso_gpu 1.1  
7.0  
7.1  
Website Quantum ESPRESSO is an integrated suite of Open-Source computer codes for electronic-structure calculations and materials modeling at the nanoscale. It is based on density-functional theory, plane waves, and pseudopotentials.
quantum chemistry terachem 1.95A   
1.96H-beta  
Website TeraChem is general purpose quantum chemistry software designed to run on NVIDIA GPU architectures.
tools openbabel 3.1.1
Website Open Babel is a chemical toolbox designed to speak the many languages of chemical data.
tools py-openbabel 3.1.1.1_py39
Website Python bindings for Open Babel.

devel#

Field Module name Version(s) URL Description
build bazel 0.16.1
0.26.1
0.29.1
Website Bazel is a fast, scalable, multi-language and extensible build system.
build bazelisk 1.3.0
1.8.0
Website Bazelisk is a wrapper for Bazel written in Go.
build binutils 2.38
Website The GNU Binutils are a collection of binary tools.
build cmake 3.8.1
3.11.1
3.13.1
3.20.3
3.24.2
Website CMake is an extensible, open-source system that manages the build process in an operating system and in a compiler-independent manner.
build kerl 1.8.5
Website Kerl is a tool to easily build and install Erlang/OTP instances.
build make 4.4
Website GNU Make is a tool which controls the generation of executables and other non-source files of a program from the program's source files.
build ninja 1.9.0
Website Ninja is a small build system with a focus on speed.
build py-meson 0.51.1_py36
Website Meson is an open source build system meant to be both extremely fast, and, even more importantly, as user friendly as possible.
build py-scons 3.0.5_py27
3.0.5_py36
4.7.0_py312
Website SCons is an Open Source software construction tool.
compiler aocc 2.1.0
2.2.0
Website AMD Optimizing C/C++ Compiler - AOCC is a highly optimized C, C++ and Fortran compiler for x86 targets especially for Zen based AMD processors.
compiler gcc 6.3.0 
7.1.0
7.3.0
8.1.0
9.1.0
10.1.0
10.3.0
12.1.0
Website The GNU Compiler Collection includes front ends for C, C++, Fortran, Java, and Go, as well as libraries for these languages (libstdc++, libgcj,...).
compiler icc 2017.u2
2018.u1
2018
2019
Website Intel C++ Compiler, also known as icc or icl, is a group of C and C++ compilers from Intel
compiler ifort 2017.u2
2018.u1
2018
2019
Website Intel Fortran Compiler, also known as ifort, is a group of Fortran compilers from Intel
compiler llvm 7.0.0 
3.8.1
4.0.0
5.0.0
9.0.1
15.0.3
Website The LLVM Project is a collection of modular and reusable compiler and toolchain technologies. Clang is an LLVM native C/C++/Objective-C compiler,
compiler nvhpc 21.5 
21.7  
22.3  
23.3  
Website NVIDIA HPC Software Development Kit (SDK) including C, C++, and Fortran compilers.
compiler pgi 19.10
Website PGI compilers and tools, including Open MPI (Community Edition).
compiler smlnj 110.81
Website Standard ML of New Jersey (abbreviated SML/NJ) is a compiler for the Standard ML '97 programming language.
data h5utils 1.12.1
Website h5utils is a set of utilities for visualization and conversion of scientific data in the free, portable HDF5 format.
data hdf5 1.10.6  
1.10.0p1
1.10.2 
1.12.0
1.12.2 
Website HDF5 is a data model, library, and file format for storing and managing data. It supports an unlimited variety of datatypes, and is designed for flexible and efficient I/O and for high volume and complex data.
data hiredis 0.13.3
Website Hiredis is a minimalistic C client library for the Redis database.
data ncl 6.4.0
6.6.2
Website NCL is a free interpreted language designed specifically for scientific data processing and visualization.
data nco 4.8.0 
5.0.6
Website The NCO toolkit manipulates and analyzes data stored in netCDF-accessible formats.
data netcdf 4.4.1.1
4.8.1
Website NetCDF is a set of software libraries and self-describing, machine-independent data formats that support the creation, access, and sharing of array-oriented scientific data.
data netcdf-c 4.9.0 
Website NetCDF is a set of software libraries and self-describing, machine-independent data formats that support the creation, access, and sharing of array-oriented scientific data. This module provides C libraries.
data netcdf-cxx 4.3.1 
Website NetCDF is a set of software libraries and self-describing, machine-independent data formats that support the creation, access, and sharing of array-oriented scientific data. This module provides C++ libraries.
data netcdf-fortran 4.5.4 
Website NetCDF is a set of software libraries and self-describing, machine-independent data formats that support the creation, access, and sharing of array-oriented scientific data. This module provides Fortran libraries.
data pnetcdf 1.8.1 
1.12.3 
Website Parallel netCDF (PnetCDF) is a parallel I/O library for accessing NetCDF files in CDF-1, 2, and 5 formats.
data protobuf 3.4.0 
3.20.0
21.9
Website Protocol Buffers (a.k.a., protobuf) are Google's language-neutral, platform-neutral, extensible mechanism for serializing structured data.
data py-pandas 0.23.0_py27
0.23.0_py36
1.0.3_py36
1.3.1_py39
2.0.1_py39
2.2.1_py312
Website pandas is an open source, BSD-licensed library providing high-performance, easy-to-use data structures and data analysis tools for the Python programming language.
data py-protobuf 3.4.0_py27 
3.4.0_py36
3.6.1_py27
3.6.1_py36
3.15.8_py36
3.20.1_py39
4.21.9_py39
Website Python bindings for Google's Protocol Buffers data interchange format.
data redis 4.0.1
Website Redis is an open source, in-memory data structure store, used as a database, cache and message broker.
data zfp 1.0.0
Website zfp is an open-source library for compressed floating-point and integer arrays that support high throughput read and write random access.
data analytics hadoop 3.1.0 
3.3.1
Website The Apache Hadoop software library is a framework that allows for the distributed processing of large data sets across clusters of computers using simple programming models.
data analytics py-sparkhpc 0.3_py27
Website Launching and controlling spark on HPC clusters
data analytics spark 2.3.0 
3.2.1
Website Apache Spark™ is a unified analytics engine for large-scale data processing.
debug gdb 8.2.1
Website GDB is the GNU Project debugger.
debug valgrind 3.14.0
Website Valgrind is an instrumentation framework for building dynamic analysis tools.
engine v8 8.4.371.22
Website V8 is Google’s open source high-performance JavaScript and WebAssembly engine, written in C++.
framework dotnet 2.1.500
6.0.413
Website .NET is a free, cross-platform, open source developer platform for building many different types of applications.
framework ga 5.8.2
Website Global Arrays (GA) is a Partitioned Global Address Space (PGAS) programming model.
framework py-kedro 0.18.0_py39
Website Kedro is an open-source Python framework for creating reproducible, maintainable and modular data science code.
IDE code-server 4.16.1
Website Run VS Code on any machine anywhere and access it in the browser.
IDE py-jupytext 1.16.1_py39
Website Jupyter Notebooks as Markdown Documents, Julia, Python or R scripts.
language cuda 9.0.176  
8.0.61 
9.1.85 
9.2.88 
9.2.148 
10.0.130 
10.1.105 
10.1.168 
10.2.89 
11.0.3 
11.1.1 
11.2.0 
11.3.1 
11.4.1 
11.5.0 
11.7.1 
12.0.0 
12.1.1 
12.2.0 
12.4.0 
Website CUDA is a parallel computing platform and application programming interface (API) model created by Nvidia. It allows software developers and software engineers to use a CUDA-enabled graphics processing unit (GPU) for general purpose processing.
language erlang 21.3
Website Erlang is a programming language used to build massively scalable soft real-time systems with requirements on high availability.
language gcl 2.6.14
Website GCL is the official Common Lisp for the GNU project.
language go 1.9
1.14
1.18.2
Website Go is an open source programming language that makes it easy to build simple, reliable, and efficient software.
language guile 2.0.11
2.2.2
Website GNU Guile is the preferred extension system for the GNU Project, which features an implementation of the Scheme programming language.
language haskell 8.6.5
Website Haskell is a statically typed, purely functional programming language with type inference and lazy evaluation.
language java 1.8.0_131 
11.0.11
12.0.2
17.0.4
18.0.2
Website Java is a general-purpose computer programming language that is concurrent, class-based, object-oriented,[14] and specifically designed to have as few implementation dependencies as possible.
language julia 1.3.1
1.4.0
1.5.1
1.6.2
1.7.2
1.8.4
1.9.0
1.10.0
Website Julia is a high-level, high-performance dynamic programming language for numerical computing.
language lua 5.3.4
Website Lua is a powerful, efficient, lightweight, embeddable scripting language. It supports procedural programming, object-oriented programming, functional programming, data-driven programming, and data description.
language luarocks 2.4.3
Website LuaRocks is the package manager for Lua modules.
language manticore 20180301
Website Manticore is a high-level parallel programming language aimed at general-purpose applications running on multi-core processors.
language nodejs 8.9.4
9.5.0
16.13.0
18.15.0
Website Node.js is a JavaScript runtime built on Chrome's V8 JavaScript engine. It provides the npm package manager.
language perl 5.26.0
5.36.1
Website Perl 5 is a highly capable, feature-rich programming language with over 29 years of development.
language php 7.3.0
Website PHP (recursive acronym for PHP: Hypertext Preprocessor) is an open source general-purpose scripting language that is especially suited for web development.
language py-cython 0.27.3_py27
0.27.3_py36
0.29.21_py36
0.29.28_py39
Website Cython is an optimising static compiler for both the Python programming language and the extended Cython programming language (based on Pyrex).
language py-ipython 5.4.1_py27 
6.1.0_py36
8.3.0_py39
8.22.2_py312
Website IPython is a command shell for interactive computing in multiple programming languages, originally developed for the Python programming language.
language py-jupyter 1.0.0_py27 
1.0.0_py36
1.0.0_py39
Website Jupyter is a browser-based interactive notebook for programming, mathematics, and data science. It supports a number of languages via plugins.
language py-jupyterlab 2.3.2_py36
4.0.8_py39
Website Jupyter is a browser-based interactive notebook for programming, mathematics, and data science. It supports a number of languages via plugins.
language python 2.7.13 
3.6.1
3.9.0
3.12.1
Website Python is an interpreted, interactive, object-oriented programming language.
language ruby 2.4.1
2.7.1
3.1.2
Website A dynamic, open source programming language with a focus on simplicity and productivity. It has an elegant syntax that is natural to read and easy to write.
language rust 1.35.0
1.56.1
1.63.0
1.72.0
Website A language empowering everyone to build reliable and efficient software.
language scala 2.12.6
Website Scala combines object-oriented and functional programming in one concise, high-level language.
lib ant 1.10.1
Website Apache Ant is a Java library and command-line tool whose mission is to drive processes described in build files as targets and extension points dependent upon each other.
lib boost 1.64.0
1.69.0 
1.75.0 
1.76.0 
1.79.0 
Website Boost is a set of libraries for the C++ programming language that provide support for tasks and structures such as linear algebra, pseudorandom number generation, multithreading, image processing, regular expressions, and unit testing.
lib chai 2.2.2  
Website Copy-hiding array abstraction to automatically migrate data between memory spaces.
lib cnmem 1.0.0 
Website CNMeM is a simple library to help the Deep Learning frameworks manage CUDA memory.
lib conduit 0.5.1  
Website Simplified Data Exchange for HPC Simulations.
lib cub 1.7.3 
1.10.0 
Website CUB is a flexible library of cooperative threadblock primitives and other utilities for CUDA kernel programming.
lib cutlass 0.1.0
3.1.0 
Website CUTLASS is a collection of CUDA C++ template abstractions for implementing high-performance matrix-multiplication (GEMM) at all levels and scales within CUDA.
lib dtcmp 1.1.3
Website Datatype Compare (DTCMP) Library for sorting and ranking distributed data using MPI.
lib eigen 3.3.3
3.4.0
Website Eigen is a C++ template library for linear algebra: matrices, vectors, numerical solvers, and related algorithms.
lib libcircle 0.3.0 
Website libcircle is an API for distributing embarrassingly parallel workloads using self-stabilization.
lib libctl 3.2.2
4.0.1
4.5.0
Website libctl is a library for supporting flexible control files in scientific simulations.
lib libevent 2.1.12
Website The libevent API provides a mechanism to execute a callback function when a specific event occurs on a file descriptor or after a timeout has been reached.
lib libgpuarray 0.7.5 
Website Library to manipulate tensors on the GPU.
lib libtree 2.0.0
Website libtree prints shared object dependencies as a tree.
lib lwgrp 1.0.4 
Website The Light-weight Group Library provides methods for MPI codes to quickly create and destroy process groups.
lib nccl 1.3.4 
2.0.4 
2.1.15 
2.2.13 
2.3.7 
2.4.8 
2.5.6 
2.8.4 
2.11.4 
2.17.1 
2.20.5 
Website NCCL (pronounced 'Nickel') is a stand-alone library of standard collective communication routines, such as all-gather, reduce, broadcast, etc., that have been optimized to achieve high bandwidth over PCIe.
lib pugixml 1.12.1
Website Light-weight, simple and fast XML parser for C++ with XPath support.
lib py-cutlass 3.1.0_py39 
Website Python interface for CUTLASS
lib py-h5py 2.7.1_py27 
2.8.0_py36
2.10.0_py36
3.1.0_py36
3.7.0_py39
3.10.0_py312
Website The h5py package is a Pythonic interface to the HDF5 binary data format.
lib py-netcdf4 1.3.1_py27 
1.3.1_py36
Website netcdf4-python is a Python interface to the netCDF C library.
lib py-nose 1.3.7_py39
Website nose is nicer testing for python.
lib py-numba 0.35.0_py27 
0.35.0_py36
0.53.1_py36
0.54.1_py39
Website Numba is a compiler for Python array and numerical functions that gives you the power to speed up your applications with high performance functions written directly in Python..
lib py-parsl 1.2.0_py39
Website Parsl is a flexible and scalable parallel programming library for Python.
lib py-pycuda 2017.1.1_py27 
2021.1_py36 
Website PyCUDA lets you access Nvidia‘s CUDA parallel computation API from Python.
lib py-rmm 23.04.00_py39 
Website Python interface for RMM
lib py-schwimmbad 0.3.1_py36 
0.3.2_py39 
Website schwimmbad provides a uniform interface to parallel processing pools and enables switching easily between local development (e.g., serial processing or with multiprocessing) and deployment on a cluster or supercomputer (via, e.g., MPI or JobLib).
lib py-scikit-image 0.13.0_py27
0.14.0_py27
0.15.0_py27
0.15.0_py36
0.17.2_py36
0.19.3_py39
0.20.0_py39
Website scikit-image is a collection of algorithms for image processing.
lib rabbitmq 3.7.13
Website RabbitMQ is an open-source message broker.
lib raja 0.12.1  
Website Collection of C++ software abstractions that enable architecture portability for HPC applications.
lib rmm 23.04.00 
Website RAPIDS Memory Manager library
lib swig 3.0.12
Website SWIG is an interface compiler that connects programs written in C and C++ with scripting languages such as Perl, Python, Ruby, and Tcl.
lib tbb 2017.u2
2018.u1
2018
2019
Website Intel® Threading Building Blocks (Intel® TBB) is a widely used C++ library for shared-memory parallel programming and heterogeneous computing (intra-node distributed memory programming).
lib trilinos 12.12.1 
Website Trilinos is a collection of open-source software libraries, called packages, intended to be used as building blocks for the development of scientific applications.
lib xsimd 7.6.0
8.1.0
Website C++ wrappers for SIMD intrinsics and parallelized, optimized mathematical functions (SSE, AVX, NEON, AVX512)
lib zeromq 4.2.2
Website ZeroMQ (also spelled ØMQ, 0MQ or ZMQ) is a high-performance asynchronous messaging library, aimed at use in distributed or concurrent applications.
mpi hpcx 2.6.0  
2.7.0  
2.8.1  
Website Mellanox HPC-X toolkit is a comprehensive software package that includes MPI and SHMEM/PGAS communications libraries.
mpi impi 2017.u2 
2018.u1 
2018 
2019 
Website Intel® MPI Library is a multi-fabric message passing library that implements the Message Passing Interface, version 3.1 (MPI-3.1) specification.
mpi openmpi 4.1.2   
2.0.2 
2.1.1 
3.1.2  
4.0.3  
4.0.5  
4.1.0  
4.1.6  
Website The Open MPI Project is an open source Message Passing Interface implementation that is developed and maintained by a consortium of academic, research, and industry partners.
mpi py-mpi4py 3.0.0_py27 
3.0.3_py36 
3.1.3_py39 
3.1.5_py312 
Website MPI for Python provides Python bindings for the Message Passing Interface (MPI) standard. It is implemented on top of the MPI-½/3 specification and exposes an API which grounds on the standard MPI-2 C++ bindings.
networking gasnet 1.30.0 
Website GASNet is a language-independent, low-level networking layer that provides network-independent, high-performance communication primitives tailored for implementing parallel global address space SPMD languages and libraries.
networking libfabric 1.6.0
1.6.2
1.7.1
1.9.1
1.10.1
1.11.1
1.14.0
Website The Open Fabrics Interfaces (OFI) is a framework focused on exporting fabric communication services to applications. Libfabric is the library that defines and exports the user-space API of OFI.
networking py-ucx-py 0.24.0_py39
Website Python bindinbgs for UCX.
networking ucx 1.3.1
1.8.1 
1.9.0 
1.10.0 
1.12.1 
1.15.0 
Website UCX is a communication library implementing high-performance messaging for MPI/PGAS frameworks.
parser antlr 2.7.7
Website ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, or translating structured text or binary files.
parser xerces-c 3.2.1
Website Xerces-C++ is a validating XML parser written in a portable subset of C++.
profiling amd-uprof 3.3.462
Website AMD uProf is a performance analysis tool for applications.
profiling darshan 3.4.4
Website Darshan is a scalable HPC I/O characterization tool.
runtime starpu 1.3.2 
Website StarPU is a unified runtime system that offers support for heterogeneous multicore architectures

math#

Field Module name Version(s) URL Description
computational geometry cgal 4.10
Website The Computational Geometry Algorithms Library (CGAL) is a C++ library that aims to provide easy access to efficient and reliable algorithms in computational geometry.
computational geometry dealii 9.4.1
Website deal.II is a C++ program library targeted at the computational solution of partial differential equations using adaptive finite elements.
computational geometry gmsh 4.10.1
Website Gmsh is an open source 3D finite element mesh generator with a built-in CAD engine and post-processor.
computational geometry opencascade 7.6.2
Website Open CASCADE Technology (OCCT) is an open-source full-scale 3D geometry library
computational geometry polymake 4.10
Website polymake is open source software for research in polyhedral geometry.
computational geometry qhull 2015.2
Website Qhull computes the convex hull, Delaunay triangulation, Voronoi diagram, halfspace intersection about a point, furthest-site Delaunay triangulation, and furthest-site Voronoi diagram.
computational geometry silo 4.11
Website A mesh and field I/O library and scientific database.
deep learning cudnn 6.0 
7.0.1 
7.0.4 
7.0.5 
7.1.4 
7.4.1.5 
7.6.4 
7.6.5 
8.1.1.33 
8.3.3.40 
8.6.0.163 
8.9.0.131 
9.0.0.312 
Website NVIDIA cuDNN is a GPU-accelerated library of primitives for deep neural networks.
deep learning cutensor 1.2.0 
1.5.0.3 
Website GPU-accelerated tensor linear algebra library.
deep learning py-gym 0.21.0_py39
Website Gym is a toolkit for developing and comparing reinforcement learning algorithms.
deep learning py-horovod 0.12.1_py27   
0.12.1_py36  
Website Horovod is a distributed training framework for TensorFlow. The goal of Horovod is to make distributed Deep Learning fast and easy to use.
deep learning py-keras 2.1.5_py27  
2.0.8_py27 
2.1.5_py36 
2.2.4_py27 
2.2.4_py36 
2.3.1_py36 
Website Keras is a high-level neural networks API, written in Python and capable of running on top of TensorFlow, CNTK, or Theano.
deep learning py-onnx 1.0.1_py27
1.8.1_py36
1.12.0_py39
Website ONNX is a open format to represent deep learning models.
deep learning py-pytorch 0.3.0_py27  
0.2.0_py27 
0.2.0_py36 
0.3.0_py36 
1.0.0_py27 
1.0.0_py36 
1.4.0_py36 
1.6.0_py36 
1.8.1_py39 
1.11.0_py39  
2.0.0_py39  
2.2.1_py312  
Website PyTorch is a deep learning framework that puts Python first.
deep learning py-tensorboardx 1.8_py27 
Website TensorboardX is TensorBoard™ for PyTorch (and Chainer, MXNet, NumPy...)
deep learning py-tensorflow 2.1.0_py36  
1.4.0_py27 
1.5.0_py27 
1.5.0_py36 
1.9.0_py27 
1.9.0_py36 
2.4.1_py36 
2.6.2_py36 
2.9.1_py39 
2.10.0_py39 
Website TensorFlow™ is an open source software library for numerical computation using data flow graphs.
deep learning py-tensorlayer 1.6.3_py27 
Website TensorLayer is a Deep Learning (DL) and Reinforcement Learning (RL) library extended from Google TensorFlow.
deep learning py-tensorrt 8.5.1.7_py39 
10.0.1_py312 
Website Python bindings for the TensorRT library.
deep learning py-theano 1.0.1_py27 
Website Theano is a Python library that allows you to define, optimize, and evaluate mathematical expressions involving multi-dimensional arrays efficiently.
deep learning py-torchvision 0.15.1_py39
0.17.1_py312
Website Datasets, model architectures, and common image transformations for computer vision for PyTorch.
deep learning py-triton 1.0.0_py39 
Website Triton is a language and compiler for writing highly efficient custom Deep-Learning primitives.
deep learning tensorrt 3.0.1 
3.0.4 
4.0.1.6 
5.0.2.6 
6.0.1.8 
7.0.0.11 
7.2.3.4 
8.5.1.7 
10.0.1.6 
Website NVIDIA TensorRT™ is a high-performance deep learning inference optimizer and runtime that delivers low latency, high-throughput inference for deep learning applications.
deep learning torch 20180202 
Website Torch is a scientific computing framework with wide support for machine learning algorithms that puts GPUs first.
graph computing bliss 0.73
Website A tool for computing automorphism groups and canonical forms of graphs.
lib opencv 3.3.0  
4.5.2 
4.5.5 
4.7.0 
4.9.0 
Website OpenCV (Open Source Computer Vision Library) is an open source computer vision and machine learning software library.
linear algebra armadillo 8.200.1
Website Armadillo is a high quality linear algebra library (matrix maths) for the C++ language, aiming towards a good balance between speed and ease of use.
linear algebra cusparselt 0.2.0.1 
Website NVIDIA cuSPARSELt is a high-performance CUDA library for sparse matrix-matrix multiplication.
machine learning py-accelerate 0.29.3_py312
Website Huggingface Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration.
machine learning py-datasets 2.18.0_py312
Website Hugging Face Datasets is a library for easily accessing and sharing datasets for Audio, Computer Vision, and Natural Language Processing (NLP) tasks.
machine learning py-huggingface-hub 0.22.1_py312
Website The huggingface_hub library allows you to interact with the Hugging Face Hub, a machine learning platform for creators and collaborators.
machine learning py-kaolin 0.15.0_py39
Website A PyTorch Library for Accelerating 3D Deep Learning Research.
machine learning py-safetensors 0.4.2_py312
Website Simple, safe way to store and distribute tensors.
machine learning py-scikit-learn 0.19.1_py27 
0.19.1_py36
0.24.2_py36
1.0.2_py39
1.3.2_py39
Website Scikit-learn is a free software machine learning library for the Python programming language.
machine learning py-tinygrad 0.8.0_py312
Website tinygrad is a deep learning framework that aims to provide a balance between simplicity and functionality.
machine learning py-tokenizers 0.15.2_py312
Website Hugging Face Tokenizers provides an implementation of today’s most used tokenizers, with a focus on performance and versatility.T
machine learning py-torch-nvidia-apex 23.08_py312
Website A PyTorch Extension: Tools for easy mixed precision and distributed training in Pytorch.
machine learning py-torchtune 0.1.1_py312
Website torchtune is a PyTorch-native library for easily authoring, fine-tuning and experimenting with LLMs.
machine learning py-transformers 4.39.1_py312
Website Hugging Face Transformers provides APIs and tools to easily download and train state-of-the-art pretrained models.
numerical analysis matlab R2022b  
R2017a 
R2017b 
R2018a 
R2019a 
R2020a 
R2024a 
Website MATLAB is a multi-paradigm numerical computing environment and proprietary programming language developed by MathWorks.
numerical analysis octave 4.2.1
Website GNU Octave is a high-level language primarily intended for numerical computations.
numerical library arpack 3.5.0
3.7.0 
3.9.0 
Website Collection of Fortran77 subroutines designed to solve large scale eigenvalue problems.
numerical library blis 2.1
2.2.4
3.1.0
Website BLIS is a portable software framework for instantiating high-performance BLAS-like dense linear algebra libraries.
numerical library fftw 2.1.5
3.3.6 
3.3.8 
3.3.9
3.3.10 
Website The Fastest Fourier Transform in the West (FFTW) is a software library for computing discrete Fourier transforms (DFTs).
numerical library flexiblas 3.1.3
Website FlexiBLAS is a BLAS and LAPACK wrapper library with runtime exchangeable backends.
numerical library flint 2.9.0
Website FLINT is a C library for doing number theory.
numerical library glpk 4.63
Website The GLPK (GNU Linear Programming Kit) package is intended for solving large-scale linear programming (LP), mixed integer programming (MIP), and other related problems.
numerical library gmp 6.1.2
6.2.1
Website GMP is a free library for arbitrary precision arithmetic, operating on signed integers, rational numbers, and floating-point numbers.
numerical library gsl 1.16
2.3
2.7
Website The GNU Scientific Library (GSL) is a numerical library for C and C++ programmers. The library provides a wide range of mathematical routines such as random number generators, special functions and least-squares fitting.
numerical library harminv 1.4.1
Website harminv is a program designed to solve the problem of harmonic inversion: given a time series consisting of a sum of sinusoids (modes), extract their frequencies and amplitudes.
numerical library hypre 2.20.0 
Website HYPRE is a library of high performance preconditioners and solvers featuring multigrid methods for the solution of large, sparse linear systems of equations on massively parallel computers.
numerical library imkl 2017.u2
2018.u1
2018
2019
Website Intel Math Kernel Library (Intel MKL) is a library of optimized math routines for science, engineering, and financial applications. Core math functions include BLAS, LAPACK, ScaLAPACK, sparse solvers, fast Fourier transforms, and vector math.[3] The routines in MKL are hand-optimized specifically for Intel processors
numerical library libflame 2.1
2.2.4
3.1.0
Website libflame is a portable library for dense matrix computations, providing much of the functionality present in LAPACK
numerical library libxsmm 1.8.1
1.17
Website LIBXSMM is a library for small dense and small sparse matrix-matrix multiplications as well as for deep learning primitives such as small convolutions
numerical library metis 5.1.0
Website METIS is a set of serial programs for partitioning graphs, partitioning finite element meshes, and producing fill reducing orderings for sparse matrices.
numerical library mpc 1.2.1
Website GNU MPC is a C library for the arithmetic of complex numbers with arbitrarily high precision and correct rounding of the result.
numerical library mpfr 3.1.5
4.1.0
Website The MPFR library is a C library for multiple-precision floating-point computations with correct rounding.
numerical library mumps 5.1.2
Website A parallel sparse direct solver.
numerical library openblas 0.3.10 
0.2.19
0.3.4
0.3.9
0.3.20
0.3.26
Website OpenBLAS is an optimized BLAS library
numerical library parmetis 4.0.3 
Website ParMETIS is an MPI-based parallel library that implements a variety of algorithms for partitioning unstructured graphs, meshes, and for computing fill-reducing orderings of sparse matrices.
numerical library petsc 3.10.3 
3.18.5 
Website PETSc, the Portable, Extensible Toolkit for Scientific Computation, is a suite of data structures and routines for the scalable (parallel) solution of scientific applications modeled by partial differential equations.
numerical library py-autograd 1.0_py39 
Website Autograd can automatically differentiate native Python and Numpy code.
numerical library py-cupy 7.8.0_py36 
10.2.0_py39 
12.1.0_py39 
Website CuPy is an implementation of NumPy-compatible multi-dimensional array on CUDA.
numerical library py-gmpy2 2.0.8_py36
Website gmpy2 is a C-coded Python extension module that supports multiple-precision arithmetic.
numerical library py-jax 0.4.7_py39
Website JAX is Autograd and XLA, brought together for high-performance numerical computing.
numerical library py-jaxlib 0.4.7_py39
Website XLA library for Jax.
numerical library py-numpy 1.14.3_py27 
1.14.3_py36
1.17.2_py36
1.18.1_py36
1.19.2_py36
1.20.3_py39
1.24.2_py39
1.26.3_py312
Website NumPy is the fundamental package for scientific computing with Python.
numerical library py-petsc4py 3.18.5_py39
Website Python bindings for PETSc, the Portable, Extensible Toolkit for Scientific Computation.
numerical library py-psbody-mesh 0.4_py39
Website The MPI-IS Mesh Processing Library contains core functions for manipulating meshes and visualizing them.
numerical library py-pyublas 2017.1_py27
Website PyUblas provides a seamless glue layer between Numpy and Boost.Ublas for use with Boost.Python.
numerical library py-pywavelets 1.6.0_py39
1.6.0_py312
Website PyWavelets is a free Open Source library for wavelet transforms in Python.
numerical library py-scipy 1.1.0_py27 
1.1.0_py36
1.4.1_py36
1.6.3_py39
1.10.1_py39
1.12.0_py312
Website The SciPy library provides many user-friendly and efficient numerical routines such as routines for numerical integration and optimization.
numerical library py-slepc4py 3.18.2_py39
Website Python bindings for SLEPc.
numerical library py-tabmat 3.1.2_py39
Website Efficient matrix representations for working with tabular data.
numerical library qrupdate 1.1.2
Website qrupdate is a Fortran library for fast updates of QR and Cholesky decompositions.
numerical library scalapack 2.0.2 
2.1 
2.2.0 
Website ScaLAPACK is a library of high-performance linear algebra routines for parallel distributed memory machines.
numerical library scotch 6.0.4 
Website Software package and libraries for sequential and parallel graph partitioning, static mapping and clustering, sequential mesh and hypergraph partitioning, and sequential and parallel sparse matrix block ordering.
numerical library slepc 3.18.2 
Website SLEPc is a Scalable Library for Eigenvalue Problem Computations.
numerical library suitesparse 7.4.0 
Website SuiteSparse is a suite of sparse matrix algorithms.
numerical library superlu 5.2.1 
Website SuperLU is a general purpose library for the direct solution of large, sparse, nonsymmetric systems of linear equations.
numerical library tetgen 1.6.0
Website TetGen provides various features to generate good quality and adaptive tetrahedral meshes suitable for numerical methods, such as finite element or finite volume methods.
numerical library xblas 1.0.248
Website Extra precise basic linear algebra subroutines.
optimization gurobi 7.5.1
8.0.1_py27
8.0.1_py36
9.0.3_py36
10.0.1_py39
Website The Gurobi Optimizer is a commercial optimization solver for mathematical programming.
optimization knitro 10.3.0 
12.4.0
Website Artelys Knitro is an optimization solver for difficult large-scale nonlinear problems.
optimization nlopt 2.6.2
Website NLopt is a free/open-source library for nonlinear optimization.
optimization octeract 3.3.0
Website Octeract Engine is a proprietary massively parallel deterministic global optimization solver for general Mixed-Integer Nonlinear Programs (MINLP).
optimization py-optuna 2.10.0_py39
Website Optuna is an automatic hyperparameter optimization software framework, particularly designed for machine learning.
optimization sundials 6.4.1
Website SUNDIALS is a family of software packages providing robust and efficient time integrators and nonlinear solvers that can easily be incorporated into existing simulation codes.
scientific computing py-scipystack 1.0_py27 
1.0_py36
Website The SciPy Stack is a collection of open source software for scientific computing in Python. It provides the following packages: numpy, scipy, matplotlib, ipython, jupyter, pandas, sympy and nose.
statistics datamash 1.3
Website GNU datamash is a command-line program which performs basic numeric, textual and statistical operations on input textual data files.
statistics jags 4.3.0
4.3.1
Website Just another Gibbs sampler (JAGS) is a program for simulation from Bayesian hierarchical models using Markov chain Monte Carlo (MCMC).
statistics py-emcee 3.1.4_py39
Website The Python ensemble sampling toolkit for affine-invariant MCMC
statistics py-glum 2.1.2_py39
Website glum is a fast, modern, Python-first GLM estimation library.
statistics py-rpy2 2.8.6_py27
2.9.2_py36
Website rpy2 is an interface to R running embedded in a Python process.
statistics R 4.2.0 
3.4.0
3.5.1
3.6.1
4.0.2
4.1.2
4.3.2
Website R is a free software environment for statistical computing and graphics.
statistics rstudio 1.3.1093 
2023.09.1
Website RStudio is an integrated development environment (IDE) for R. It includes a console, syntax-highlighting editor that supports direct code execution, as well as tools for plotting, history, debugging and workspace management.
statistics rstudio-desktop 2022.02.2-485
Website RStudio is an integrated development environment (IDE) for R. It includes a console, syntax-highlighting editor that supports direct code execution, as well as tools for plotting, history, debugging and workspace management. This is the X11/GUI version.
statistics sas 9.4 
Website SAS is a software suite developed by SAS Institute for advanced analytics, multivariate analyses, business intelligence, data management, and predictive analytics.
statistics stata 15  
14 
16 
17 
18 
Website Stata is a complete, integrated statistical software package that provides everything you need for data analysis, data management, and graphics.
symbolic libmatheval 1.1.11
Website GNU libmatheval is a library (callable from C and Fortran) to parse and evaluate symbolic expressions input as text.
symbolic maxima 5.47.0
Website Maxima is a system for the manipulation of symbolic and numerical expressions.
symbolic py-pysr 0.12.3_py39
Website High-Performance Symbolic Regression in Python and Julia.
symbolic py-sympy 1.1.1_py27
1.1.1_py36
1.11.1_py39
Website SymPy is a Python library for symbolic mathematics.
technical computing mathematica 13.1.0 
Website A symbolic language and platform for modern technical computing.
topic modelling py-gensim 4.2.0_py39
Website Gensim is a Python library for topic modelling, document indexing and similarity retrieval with large corpora.

physics#

Field Module name Version(s) URL Description
astronomy cfitsio 4.0.0
Website FITSIO is a library of C and Fortran subroutines for reading and writing data files in FITS (Flexible Image Transport System) data format.
astronomy heasoft 6.22.1
6.26.1
Website HEAsoft is a Unified Release of the FTOOLS (General and mission-specific tools to manipulate FITS files) and XANADU (High-level, multi-mission tasks for X-ray astronomical spectral, timing, and imaging data analysis) software packages.
astronomy py-astropy 4.0.1_py36
Website The Astropy Project is a community effort to develop a common core package for Astronomy in Python and foster an ecosystem of interoperable astronomy packages.
astronomy py-lenstools 1.0_py36
Website This python package collects together a suite of widely used analysis tools in Weak Gravitational Lensing.
astronomy py-namaster 1.2.2_py36
Website NaMaster is a C library, Python module and standalone program to compute full-sky angular cross-power spectra of masked fields with arbitrary spin and an arbitrary number of known contaminants using a pseudo-Cl (aka MASTER) approach.
CFD su2 7.0.3
Website SU2: An Open-Source Suite for Multiphysics Simulation and Design
cliemate modeling fre-nctools 2022.01 
Website FRE-NCtools is a collection of tools to help with the creation and manipulation of netCDF files used for climate modeling.
climate modeling cdo 1.9.7.1
2.1.1
Website CDO is a collection of command line Operators to manipulate and analyse Climate and NWP model Data.
geophysics opensees 2.5.0 
Website OpenSees is a software framework for developing applications to simulate the performance of structural and geotechnical systems subjected to earthquakes.
geoscience gdal 3.4.1 
2.2.1
3.5.2
Website GDAL is a translator library for raster and vector geospatial data formats.
geoscience geos 3.6.2 
3.11.0
3.12.1
Website GEOS (Geometry Engine - Open Source) is a C++ port of Java Topology Suite (JTS).
geoscience geosx 0.2.0-20220523  
Website GEOSX is a simulation framework for modeling coupled flow, transport, and geomechanics in the subsurface.
geoscience gmtsar 6.2.2
Website An InSAR processing system based on GMT (Generic Mapping Tools).
geoscience proj 8.2.1 
4.9.3
9.1.0
Website PROJ is a generic coordinate transformation software that transforms geospatial coordinates from one coordinate reference system (CRS) to another.
geoscience py-gdal-utils 3.4.1_py39
Website gdal-utils is the GDAL Python Utilities distribution.
geoscience py-opendrift 1.0.3_py27
Website OpenDrift is a software for modeling the trajectories and fate of objects or substances drifting in the ocean, or even in the atmosphere.
geoscience py-pyproj 1.9.5.1_py27 
1.9.5.1_py36
3.4.0_py39
Website Python interface to PROJ4 library for cartographic transformations.
geoscience swash 9.01a 
Website SWASH (an acronym of Simulating WAves till SHore) is a non-hydrostatic wave-flow model.
geoscience udunits 2.2.26
Website The UDUNITS package from Unidata is a C-based package for the programatic handling of units of physical quantities.
lib libgdsii 0.21
Website libGDSII C++ is a library and command-line utility for reading GDSII geometry files.
magnetism mumax 3.10 
Website mumax3 is a GPU-accelerated micromagnetic simulation program.
materials science atat 3.36
Website Alloy Theoretic Automated Toolkit: a software toolkit for modeling coupled configurational and vibrational disorder in alloy systems.
materials science py-megnet 1.3.0_py39 
Website The MatErials Graph Network (MEGNet) is an implementation of DeepMind's graph networks[1] for universal machine learning in materials science.
materials science py-pymatgen 2022.5.26_py39
Website Pymatgen (Python Materials Genomics) is a robust, open-source Python library for materials analysis.
micromagnetics oommf 1.2b4
Website OOMMF is a set of portable, extensible public domain micromagnetic program and associated tools.
particle openmc 0.10.0
Website OpenMC is a Monte Carlo particle transport simulation code focused on neutron criticality calculations.
photonics meep 1.3 
1.4.3 
1.24.0 
Website Meep is a free finite-difference time-domain (FDTD) simulation software package to model electromagnetic systems.
photonics mpb 1.5 
1.6.2 
1.11.1 
Website MPB is a free software package for computing the band structures, or dispersion relations, and electromagnetic modes of periodic dielectric structures, on both serial and parallel computers.
quantum information science cuquantum 22.03.0.40 
Website NVIDIA cuQuantum is an SDK of optimized libraries and tools for accelerating quantum computing workflows.
quantum information science py-cuquantum-python 22.3.0_py39
Website NVIDIA cuQuantum Python provides Python bindings and high-level object-oriented models for accessing the full functionalities of NVIDIA cuQuantum SDK from Python.
quantum mechanics py-quspin 0.3.5_py36
Website QuSpin is an open-source Python package for exact diagonalization and quantum dynamics of arbitrary boson, fermion and spin many-body systems.
quantum mechanics py-qutip 4.5.2_py36
Website QuTiP is open-source software for simulating the dynamics of closed and open quantum systems.

system#

Field Module name Version(s) URL Description
backup restic 0.9.5
0.12.1
0.16.3
Website Fast, secure, efficient backup program.
benchmark hp2p 3.2 
Website Heavy Peer To Peer: a MPI based benchmark for network diagnostic.
benchmark mpibench 20190729 
Website Times MPI collectives over a series of message sizes.
benchmark mprime 29.4
Website mprime is used by GIMPS, a distributed computing project dedicated to finding new Mersenne prime numbers, and which is commonly used as a stability testing utility.
benchmark osu-micro-benchmarks 5.6.1 
5.6.3  
5.7  
5.9  
Website The OSU MicroBenchmarks carry out a variety of message passing performance tests using MPI.
benchmark py-linktest 2.1.19_py39 
Website LinkTest is a communication API benchmarking tool that tests point-to-point connections.
checkpointing dmtcp 2.6.0
Website DMTCP (Distributed MultiThreaded Checkpointing) transparently checkpoints a single-host or distributed computation in user-space -- with no modifications to user code or to the O/S.
cloud interface aws-cli 2.0.50
Website This package provides a unified command line interface to Amazon Web Services.
cloud interface google-cloud-sdk 400.0.0
448.0.0
Website Command-line interface for Google Cloud Platform products and services.
cloud interface s5cmd 2.0.0
Website Parallel S3 and local filesystem execution tool.
cloud interface steampipe 0.14.6
Website Steampipe is an open source tool for querying cloud APIs in a universal way and reasoning about the data in SQL.
compiler mrc 1.3.3
Website MRC is a resource compiler that can create self-contained applications, by including all the required data inside executable files.
compression libarchive 3.3.2
3.4.2
3.5.2
Website The libarchive project develops a portable, efficient C library that can read and write streaming archives in a variety of formats.
compression libzip 1.5.1
Website libzip is a C library for reading, creating, and modifying zip archives.
compression lz4 1.8.0
Website LZ4 is lossless compression algorithm.
compression lzo 2.10
Website LZO is a portable lossless data compression library written in ANSI C.
compression mpibzip2 0.6 
Website MPIBZIP2 is a parallel implementation of the bzip2 block-sorting file compressor that uses MPI and achieves significant speedup on cluster machines.
compression p7zip 16.02
Website p7zip is a Linux port of 7zip, a file archiver with high compression ratio.
compression pbzip2 1.1.12
Website PBZIP2 is a parallel implementation of the bzip2 block-sorting file compressor that uses pthreads and achieves near-linear speedup on SMP machines.
compression pigz 2.4
Website A parallel implementation of gzip for modern multi-processor, multi-core machines.
compression szip 2.1.1
Website Szip compression software, providing lossless compression of scientific data, is an implementation of the extended-Rice lossless compression algorithm.
compression xz 5.2.3
Website XZ Utils, the successor to LZMA Utils, is free general-purpose data compression software with a high compression ratio.
compression zlib 1.2.11
Website zlib is designed to be a free, general-purpose, legally unencumbered -- that is, not covered by any patents -- lossless data-compression library for use on virtually any computer hardware and operating system.
compression zstd 1.5.2
Website Zstandard, or zstd, is a fast lossless compression algorithm, targeting real-time compression scenarios at zlib-level and better compression ratios.
containers libnvidia-container 1.0.0rc2 
Website libnvidia-container is a library and a simple CLI utility to automatically configure GNU/Linux containers leveraging NVIDIA hardware.
containers proot 5.2.0 
5.1.0
Website PRoot is a user-space implementation of chroot, mount --bind, and binfmt_misc.
containers py-spython 0.3.13_py39
0.3.13_py312
Website Singularity Python (spython) is the Python API for working with Singularity containers.
database bdb 6.2.32
Website Berkeley DB (BDB) is a software library intended to provide a high-performance embedded database for key/value data.
database mariadb 10.2.11 
10.6.9
Website MariaDB is a community-developed fork of the MySQL relational database management system intended to remain free under the GNU GPL.
database postgresql 10.5
14.5
Website PostgreSQL is a powerful, open source object-relational database system with a strong focus on reliability, feature robustness, and performance.
database sqlite 3.18.0
3.37.2
3.44.2
Website SQLite is a self-contained, high-reliability, embedded, full-featured, public-domain, SQL database engine.
database sqliteodbc 0.9998
Website ODBC driver for SQLite
database unixodbc 2.3.9
Website unixODBC is an open-source project that implements the ODBC API.
document management pandoc 2.7.3
Website Pandoc is a universal document converter.
document processing ghostscript 9.53.2
Website Ghostscript is an interpreter for the PostScript language and PDF files.
document processing groff 1.23.0
Website groff (GNU roff) is a typesetting system that reads plain text input files that include formatting commands to produce output in PostScript, PDF, HTML, or DVI formats or for display to a terminal.
document processing lyx 2.3.2
Website LyX is a document processor.
document processing poppler 0.47.0
Website Poppler is a PDF rendering library.
document processing texinfo 6.6
Website Texinfo is the official documentation format of the GNU project.
document processing texlive 2019
Website TeX Live is an easy way to get up and running with the TeX document production system.
file management dua-cli 2.20.1
Website dua (-> Disk Usage Analyzer) is a tool to conveniently learn about the usage of disk space of a given directory.
file management duc 1.4.4
Website Duc is a collection of tools for indexing, inspecting and visualizing disk usage.
file management exa 0.8.0
Website exa is a replacement for ls written in Rust.
file management fdupes 2.2.1
Website FDUPES is a program for identifying or deleting duplicate files residing within specified directories.
file management fpart 0.9.3
Website fpart sorts files and packs them into partitions.
file management midnight-commander 4.8.29
Website GNU Midnight Commander is a visual file manager.
file management ncdu 1.18.1 
1.15.1
2.2.1
Website Ncdu is a disk usage analyzer with an ncurses interface.
file management py-pcircle 0.17_py27 
Website pcircle contains a suite of file system tools developed at OLCF to take advantage of highly scalable parallel file system such as Lustre.
file management rmlint 2.8.0
Website rmlint finds space waste and other broken things on your filesystem and offers to remove it.
file management tdu 1.36
Website tdu estimates the disk space occupied by all files in a given path.
file transfer aria2 1.35.0
Website aria2 is a lightweight multi-protocol & multi-source command-line download utility.
file transfer aspera-cli 3.9.6
Website The IBM Aspera Command-Line Interface (the Aspera CLI) is a collection of Aspera tools for performing high-speed, secure data transfers from the command line.
file transfer lftp 4.8.1
Website LFTP is a sophisticated file transfer program supporting a number of network protocols (ftp, http, sftp, fish, torrent).
file transfer mpifileutils 0.10.1 
0.11 
0.11.1 
Website mpiFileUtils is a suite of MPI-based tools to manage large datasets, which may vary from large directory trees to large files.
file transfer py-globus-cli 1.2.0
1.9.0_py27
1.9.0_py36
3.2.0_py39
3.8.0_py39
3.19.0_py39
Website A command line wrapper over the Globus SDK for Python.
file transfer py-httpie 3.2.1_py39
Website HTTPie is a command-line HTTP client designed for testing, debugging, and generally interacting with APIs and HTTP servers.
file transfer rclone 1.55.1
1.59.1
1.65.0
Website Rclone is a command line program to sync files and directories to and from: Google Drive, Amazon S3, Dropbox, Google Cloud Storage, Amazon Drive, Microsoft One Drive, Hubic, Backblaze B2, Yandex Disk, or the local filesystem.
framework mono 5.12.0.301
5.20.1.19
Website Mono is an open source implementation of Microsoft's .NET Framework based on the ECMA standards for C# and the Common Language Runtime.
hardware hwloc 2.7.0
2.9.3
Website The Portable Hardware Locality (hwloc) software package provides a portable abstraction of the hierarchical topology of modern architectures.
hardware libpciaccess 0.16
Website Generic PCI access library.
job management slurm-drmaa 1.1.2
Website DRMAA for Slurm Workload Manager (Slurm) is an implementation of Open Grid Forum Distributed Resource Management Application API (DRMAA) version 1 for submission and control of jobs to Slurm.
language tcltk 8.6.6
Website Tcl (Tool Command Language) is a dynamic programming language, suitable for web and desktop applications, networking, administration, testing. Tk is a graphical user interface toolkit.
libs apr 1.6.3
Website The Apache Portable Runtime is a supporting library for the Apache web server. It provides a set of APIs that map to the underlying operating system.
libs apr-util 1.6.1
Website The Apache Portable Runtime is a supporting library for the Apache web server. It provides a set of APIs that map to the underlying operating system.
libs atk 2.24.0
Website ATK is the Accessibility Toolkit. It provides a set of generic interfaces allowing accessibility technologies such as screen readers to interact with a graphical user interface.
libs benchmark 1.2.0
Website A microbenchmark support library
libs cairo 1.14.10
Website Cairo is a 2D graphics library with support for multiple output devices.
libs cups 2.2.4
Website CUPS is the standards-based, open source printing system.
libs dbus 1.10.22
Website D-Bus is a message bus system, a simple way for applications to talk to one another.
libs enchant 1.6.1
2.2.3
Website Enchant is a library (and command-line program) that wraps a number of different spelling libraries and programs with a consistent interface.
libs fltk 1.3.4
Website FLTK (pronounced 'fulltick') is a cross-platform C++ GUI toolkit.
libs fontconfig 2.12.4
Website Fontconfig is a library for configuring and customizing font access.
libs freeglut 3.0.0
Website FreeGLUT is a free-software/open-source alternative to the OpenGL Utility Toolkit (GLUT) library.
libs freetype 2.8.1
2.9.1
Website FreeType is a software font engine that is designed to be small, efficient, highly customizable, and portable while capable of producing high-quality output (glyph images).
libs fribidi 1.0.12
Website The Free Implementation of the Unicode Bidirectional Algorithm.
libs ftgl 2.1.2
Website FTGL is a free cross-platform Open Source C++ library that uses Freetype2 to simplify rendering fonts in OpenGL applications.
libs gc 7.6.0
Website The Boehm-Demers-Weiser conservative garbage collector can be used as a garbage collecting replacement for C malloc or C++ new.
libs gconf 2.9.91
Website GConf is a system for storing application preferences.
libs gdk-pixbuf 2.36.8
Website The GdkPixbuf library provides facilities for loading images in a variety of file formats.
libs gflags 2.2.1
2.2.2
Website The gflags package contains a C++ library that implements commandline flags processing.
libs giflib 5.1.4
Website GIFLIB is a package of portable tools and library routines for working with GIF images.
libs glib 2.52.3
Website The GLib library provides core non-graphical functionality such as high level data types, Unicode manipulation, and an object and type system to C programs.
libs glog 0.3.5
Website C++ implementation of the Google logging module.
libs gnutls 3.5.9
Website GnuTLS is a secure communications library implementing the SSL, TLS and DTLS protocols and technologies around them.
libs gobject-introspection 1.52.1
Website GObject introspection is a middleware layer between C libraries (using GObject) and language bindings.
libs googletest 1.8.0
Website Google Test is Google's C++ test framework.
libs gstreamer 1.12.0
Website GStreamer is a library for constructing graphs of media-handling components.
libs gtk+ 2.24.30
3.22.18
Website GTK+, or the GIMP Toolkit, is a multi-platform toolkit for creating graphical user interfaces.
libs harfbuzz 1.4.8
Website HarfBuzz is an OpenType text shaping engine.
libs hunspell 1.6.2
Website Hunspell is a spell checker.
libs hyphen 2.8.8
Website Hyphen is a hyphenation library to use converted TeX hyphenation patterns.
libs icu 59.1
Website ICU is a set of C/C++ and Java libraries providing Unicode and Globalization support for software applications.
libs jansson 2.13.1
Website C library for encoding, decoding and manipulating JSON data.
libs jemalloc 5.3.0
Website jemalloc is a general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support.
libs json-glib 1.4.4
Website JSON-GLib is a library providing serialization and deserialization support for the JavaScript Object Notation (JSON) format described by RFC 4627.
libs leptonica 1.82.0
Website Leptonica is an open source library containing software that is broadly useful for image processing and image analysis applications.
libs libaio 0.3.111
Website libaio provides the Linux-native API for async I/O.
libs libart_lgpl 2.3.21
Website Libart is a library for high-performance 2D graphics.
libs libcroco 0.6.13
Website Libcroco is a standalone css2 parsing and manipulation library.
libs libepoxy 1.4.1
Website Epoxy is a library for handling OpenGL function pointer management for you.
libs libexif 0.6.21
Website A library for parsing, editing, and saving EXIF data.
libs libffi 3.2.1
Website libffi is a portable Foreign Function Interface library.
libs libgcrypt 1.8.2
Website Libgcrypt is a general purpose cryptographic library originally based on code from GnuPG.
libs libgd 2.2.5
Website GD is an open source code library for the dynamic creation of images by programmers.
libs libgdiplus 5.6
Website C-based implementation of the GDI+ API
libs libglvnd 1.2.0
Website libglvnd is a vendor-neutral dispatch layer for arbitrating OpenGL API calls between multiple vendors.
libs libgnomecanvas 2.30.3
Website Library for the GNOME canvas, an engine for structured graphics that offers a rich imaging model, high performance rendering, and a powerful, high-level API.
libs libgpg-error 1.27
Website Libgpg-error is a small library that originally defined common error values for all GnuPG components.
libs libiconv 1.16
Website libiconv is a conversion library for string encoding.
libs libidl 0.8.14
Website The libIDL package contains libraries for Interface Definition Language files. This is a specification for defining portable interfaces.
libs libjpeg-turbo 1.5.1 
2.1.4
Website libjpeg-turbo is a JPEG image codec that uses SIMD instructions (MMX, SSE2, AVX2, NEON, AltiVec) to accelerate baseline JPEG compression and decompression on x86, x86-64, ARM, and PowerPC systems
libs libmng 2.0.3
Website THE reference library for reading, displaying, writing and examining Multiple-Image Network Graphics. MNG is the animation extension to the popular PNG image-format.
libs libpng 1.2.57
1.6.29
Website libpng is the official PNG reference library. It supports almost all PNG features, is extensible, and has been extensively tested for over 20 years.
libs libproxy 0.4.15
Website libproxy is a library that provides automatic proxy configuration management.
libs libressl 2.5.3
3.2.1
Website LibreSSL is a version of the TLS/crypto stack forked from OpenSSL in 2014, with goals of modernizing the codebase, improving security, and applying best practice development processes.
libs librsvg 2.36.4
Website Librsvg is a library to render SVG files using cairo as a rendering engine.
libs libseccomp 2.3.3
Website The libseccomp library provides an easy to use, platform independent, interface to the Linux Kernel's syscall filtering mechanism..
libs libsodium 1.0.18
Website Sodium is a modern, easy-to-use software library for encryption, decryption, signatures, password hashing and more.
libs libsoup 2.61.2
Website libsoup is an HTTP client/server library for GNOME.
libs libtasn1 4.13
Website Libtasn1 is the ASN.1 library used by GnuTLS, p11-kit and some other packages.
libs libtiff 4.0.8 
4.4.0
4.5.0
Website libtiff provides support for the Tag Image File Format (TIFF), a widely used format for storing image data.
libs libunistring 0.9.7
Website Libunistring provides functions for manipulating Unicode strings and for manipulating C strings according to the Unicode standard.
libs libuuid 1.0.3
Website Portable uuid C library.
libs libuv 1.38.1
Website libuv is a multi-platform support library with a focus on asynchronous I/O.
libs libwebp 0.6.1
Website WebP is a modern image format that provides superior lossless and lossy compression for images on the web.
libs libxkbcommon 0.9.1
Website libxkbcommon is a keyboard keymap compiler and support library which processes a reduced subset of keymaps as defined by the XKB (X Keyboard Extension) specification.
libs libxml2 2.9.4
Website Libxml2 is a XML C parser and toolkit.
libs libxslt 1.1.32
Website Libxslt is the XSLT C library developed for the GNOME project. XSLT itself is a an XML language to define transformation for XML.
libs mesa 17.1.6
Website Mesa is an open-source implementation of the OpenGL, Vulkan and other specifications.
libs minipmi 1.0
Website Implementation of a minimal subset of the PMI1 and PMI2 specifications.
libs ncurses 6.0
6.4
Website The ncurses (new curses) library is a free software emulation of curses in System V Release 4.0 (SVr4), and more.
libs nettle 3.3
Website Nettle is a cryptographic library that is designed to fit easily in more or less any context.
libs openjpeg 2.3.1
Website OpenJPEG is an open-source JPEG 2000 codec written in C language.
libs openssl 3.0.7
Website OpenSSL is a full-featured toolkit for general-purpose cryptography and secure communication.
libs orbit 2.14.19
Website ORBit2 is a CORBA 2.4-compliant Object Request Broker (ORB) featuring mature C, C++ and Python bindings.
libs pango 1.40.10
Website Pango is a library for laying out and rendering of text, with an emphasis on internationalization.
libs pcre 8.40
Website The PCRE library is a set of functions that implement regular expression pattern matching using the same syntax and semantics as Perl 5.
libs pcre2 10.35
10.40
Website The PCRE22 library is a set of functions that implement regular expression pattern matching using the same syntax and semantics as Perl 5.
libs popt 1.16
Website Library for parsing command line options.
libs py-lmdb 0.93
Website Universal Python binding for the LMDB 'Lightning' Database.
libs py-mako 1.0.7_py27 
1.0.7_py36
Website Mako is a template library written in Python. It provides a familiar, non-XML syntax which compiles into Python modules for maximum performance.
libs py-pygobject 3.32.2_py36
Website PyGObject is a Python package which provides bindings for GObject based libraries such as GTK, GStreamer, WebKitGTK, GLib, GIO and many more.
libs py-pyopengl 3.1.5_py39
Website Standard OpenGL bindings for Python.
libs py-pyqt5 5.9.1_py36
Website PyQt5 is a comprehensive set of Python bindings for Qt v5.
libs readline 7.0
8.2
Website The GNU Readline library provides a set of functions for use by applications that allow users to edit command lines as they are typed in.
libs serf 1.3.9
Website The serf library is a high performance C-based HTTP client library built upon the Apache Portable Runtime (APR) library.
libs sionlib 1.7.7 
Website Scalable I/O library for parallel access to task-local files.
libs snappy 1.1.7
Website A fast compressor/decompressor.
libs talloc 2.1.14
Website talloc is a hierarchical, reference counted memory pool system with destructors.
libs tesseract 5.1.0
Website Tesseract is an open source text recognition (OCR) Engine.
libs utf8proc 2.4.0
Website iutf8proc is a small, clean C library that provides Unicode normalization, case-folding, and other operations for data in the UTF-8 encoding.
libs webkitgtk 2.28.4
Website WebKitGTK is a full-featured port of the WebKit rendering engine, suitable for projects requiring any kind of web integration, from hybrid HTML/CSS applications to full-fledged web browsers.
libs wxwidgets 3.0.4
Website wxWidgets is a C++ library that lets developers create applications for Windows, macOS, Linux and other platforms with a single code base.
libs yaml-cpp 0.7.0
Website yaml-cpp is a YAML parser and emitter in C++ matching the YAML 1.2 spec.
media ffmpeg 4.0
4.2.1
5.0
Website FFmpeg is the leading multimedia framework, able to decode, encode, transcode, mux, demux, stream, filter and play pretty much anything that humans and machines have created.
media libsndfile 1.0.28
Website Libsndfile is a C library for reading and writing files containing sampled sound (such as MS Windows WAV and the Apple/SGI AIFF format) through one standard library interface.
performance likwid 4.3.2
5.2.1 
Website Likwid is a simple toolsuite of command line applications for performance oriented programmers.
resource monitoring nvtop 1.1.0 
2.0.3 
3.0.2 
Website Nvtop stands for NVidia TOP, a (h)top like task monitor for NVIDIA GPUs.
resource monitoring py-nvitop 1.3.2_py39  
1.3.2_py312 
Website An interactive NVIDIA-GPU process viewer and beyond.
resource monitoring remora 1.8.5
Website Remora is a tool to monitor runtime resource utilization.
resource monitoring ruse 2.0 
Website A command line tool to measure process resource usage.
scm gh 1.9.1
Website gh is GitHub on the command line. It brings pull requests, issues, and other GitHub concepts to the terminal next to where you are already working with git and your code.
scm git 2.39.1
Website Git is a free and open source distributed version control system designed to handle everything from small to very large projects with speed and efficiency.
scm git-annex 8.20210622
Website git-annex allows managing files with git, without checking the file contents into git.
scm git-credential-manager 2.0.696
Website Secure, cross-platform Git credential storage with authentication to GitHub, Azure Repos, and other popular Git hosting services.
scm git-lfs 2.4.0
Website Git Large File Storage (LFS) replaces large files such as audio samples, videos, datasets, and graphics with text pointers inside Git, while storing the file contents on a remote server.
scm libgit2 1.1.0
Website libgit2 is a portable, pure C implementation of the Git core methods provided as a re-entrant linkable library with a solid API
scm mercurial 4.5.3
Website Mercurial is a free, distributed source control management tool.
scm py-dvc 0.91.1_py36
Website Data Version Control or DVC is an open-source tool for data science and machine learning projects.
scm subversion 1.9.7
1.12.2
Website Subversion is an open source version control system.
shell powershell 7.1.5
Website PowerShell Core is a cross-platform automation and configuration tool/framework.
testing py-pytest 7.1.3_py39
Website pytest is a full-featured Python testing framework
tool unifdef 2.12
Website The unifdef utility selectively processes conditional C preprocessor #if and #ifdef directives.
tools clinfo 2.2.18.04.06 
Website clinfo is a simple command-line application that enumerates all possible (known) properties of the OpenCL platform and devices available on the system.
tools curl 8.4.0
Website curl is an open source command line tool and library for transferring data with URL syntax.
tools depot_tools 20200731
Website Tools for working with Chromium development.
tools expat 2.2.3
Website Expat is a stream-oriented XML parser library written in C.
tools graphicsmagick 1.3.26
Website GraphicsMagick is the swiss army knife of image processing.
tools imagemagick 7.0.7-2
Website ImageMagick is a free and open-source software suite for displaying, converting, and editing raster image and vector image files.
tools jq 1.6
Website jq is a lightweight and flexible command-line JSON processor.
tools leveldb 1.20
Website LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values.
tools lmdb 0.9.21
Website Symas LMDB is an extraordinarily fast, memory-efficient database we developed for the Symas OpenLDAP Project.
tools motif 2.3.7
Website Motif is the toolkit for the Common Desktop Environment.
tools parallel 20180122
20200822
Website GNU parallel is a shell tool for executing jobs in parallel using one or more computers.
tools password-store 1.7.4
Website Simple password manager using gpg and ordinary unix directories.
tools py-clustershell 1.9.0_py39
Website ClusterShell is an event-driven open source Python library, designed to run local or distant commands in parallel on server farms or on large Linux clusters.
tools py-matlab-proxy 0.9.1_py39
0.10.0_py39
Website matlab-proxy is a Python package which enables you to launch MATLAB and access it from a web browser.
tools py-nvidia-ml-py 12.550.52_py39 
12.550.52_py312 
Website Python bindings to the NVIDIA Management Library.
tools py-pyside 5.15.2.1_py39
Website PySide is the official Python module from the Qt for Python project, which provides access to the complete Qt framework.
tools py-wxpython 4.0.7_py39
4.2.0_py39
Website wxPython is the cross-platform GUI toolkit for the Python language,
tools qt 5.9.1 
6.4.0
Website QT is a cross-platform application framework that is used for developing application software that can be run on various software and hardware platforms.
tools ripgrep 11.0.1
Website ripgrep recursively searches directories for a regex pattern.
tools rocksdb 5.7.3
Website A library that provides an embeddable, persistent key-value store for fast storage.
tools x11 7.7
Website The X.Org project provides an open source implementation of the X Window System.
tools xkeyboard-config 2.21
Website The non-arch keyboard configuration database for X Window.

viz#

Field Module name Version(s) URL Description
data ncview 2.1.7
Website Ncview is a visual browser for netCDF format files.
gis gmt 6.4.0
Website GMT (The Generic Mapping Tools) is an open source collection of command-line tools for manipulating geographic and Cartesian data sets.
gis panoply 4.10.8
Website Panoply plots geo-referenced and other arrays from netCDF, HDF, GRIB, and other datasets.
gis py-cartopy 0.21.0_py39
Website Cartopy is a Python package designed for geospatial data processing in order to produce maps and other geospatial data analyses.
graphs graphviz 2.40.1
2.44.1
Website Graphviz is open source graph visualization software.
imaging py-pillow 5.1.0_py27 
5.1.0_py36
7.0.0_py36
8.2.0_py39
9.3.0_py39
10.2.0_py312
Website Pillow is a friendly PIL (Python Imaging Library) fork.
imaging py-pillow-simd 7.0.0.post3_py36
9.2.0_py39
10.2.0_py312
Website Pillow-SIMD is an optimized version of Pillow
molecular visualization ovito 3.7.11
Website OVITO is a scientific visualization and data analysis solution for atomistic and other particle-based models.
molecular visualization pymol 1.8.6.2 
2.5.3 
Website PyMOL is a Python-enhanced molecular graphics tool.
plotting gnuplot 5.2.0
Website Gnuplot is a portable command-line driven graphing utility for Linux, OS/2, MS Windows, OSX, VMS, and many other platforms.
plotting grace 5.1.25
Website Grace is a WYSIWYG tool to make two-dimensional plots of numerical data.
plotting mathgl 8.0.1
Website MathGL is a library to make high-quality scientific graphics.
plotting py-basemap 1.1.0_py27 
1.1.0_py36
Website The matplotlib basemap toolkit is a library for plotting 2D data on maps in Python.
plotting py-matplotlib 2.2.2_py27 
2.1.2_py27
2.1.2_py36
2.2.2_py36
3.1.1_py36
3.2.1_py36
3.4.2_py39
3.7.1_py39
3.8.3_py312
Website Matplotlib is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms.
plotting py-plotly 2.4.1_py27
5.19.0_py39
5.19.0_py312
Website Plotly's Python graphing library makes interactive, publication-quality graphs online.
plotting py-seaborn 0.12.1_py39
Website Seaborn is a Python data visualization library based on matplotlib. It provides a high-level interface for drawing attractive and informative statistical graphics.
plotting veusz 3.3.1
Website Veusz is a scientific plotting and graphing program with a graphical user interface, designed to produce publication-ready 2D and 3D plots.
remote display virtualgl 2.5.2
Website VirtualGL is an open source toolkit that gives any Unix or Linux remote display software the ability to run OpenGL applications with full 3D hardware acceleration.
\ No newline at end of file diff --git a/docs/software/modules/index.html b/docs/software/modules/index.html new file mode 100644 index 000000000..617aecd1d --- /dev/null +++ b/docs/software/modules/index.html @@ -0,0 +1,155 @@ + Modules - Sherlock

Modules

Environment modules#

Software is provided on Sherlock under the form of loadable environment modules.

Software is only accessible via modules

The use of a module system means that most software is not accessible by default and has to be loaded using the module command. This mechanism allows us to provide multiple versions of the same software concurrently, and gives users the possibility to easily switch between software versions.

Sherlock uses Lmod to manage software installations. The modules system helps setting up the user's shell environment to give access to applications, and make running and compiling software easier. It also allows us to provide multiple versions of the same software, that would otherwise conflict with each other, and abstract things from the OS sometimes rigid versions and dependencies.

When you first log into Sherlock, you'll be presented with a default, bare bone environment with minimal software available. The module system is used to manage the user environment and to activate software packages on demand. In order to use software installed on Sherlock, you must first load the corresponding software module.

When you load a module, the system will set or modify your user environment variables to enable access to the software package provided by that module. For instance, the $PATH environment variable might be updated so that appropriate executables for that package can be used.

Module categories#

Modules on Sherlock are organized by scientific field, in distinct categories. This is to limit the information overload that can result when displaying the full list of available modules. Given the large diversity of the Sherlock user population, all users are not be interested in the same kind of software, and high-energy physicists may not want to see their screens cluttered with the latest bioinformatics packages.

Module categories

You will first have to load a category module before getting access to individual modules. The math and devel categories are loaded by default, and modules in those categories can be loaded directly

For instance, to be able to load the gromacs module, you'll first need to load the chemistry module. This can be done in a single command, by specifying first the category, then the actual application module name:

$ module load chemistry gromacs
+

The math and devel categories, which are loaded by default, provide direct access to compilers, languages, and MPI and numerical libraries.

For a complete list of software module categories, please refer to the list of available software

Searching for a module

To know how to access a module, you can use the module spider <module_name> command. It will search through all the installed modules, even if they're masked, and display instructions to load them. See the Examples section for details.

Module usage#

The most common module commands are outlined in the following table. module commands may be shortened with the ml alias, with slightly different semantics.

Module names auto-completion

The module command supports auto-completion, so you can just start typing the name of a module, and press Tab to let the shell automatically complete the module name and/or version.

Module command Short version Description
module avail ml av List available software1
module spider gromacs ml spider gromacs Search for particular software
module keyword blas ml key blas Search for blas in module names and descriptions
module whatis gcc ml whatis gcc Display information about the gcc module
module help gcc ml help gcc Display module specific help
module load gcc ml gcc Load a module to use the associated software
module load gsl/2.3 ml gsl/2.3 Load specific version of a module
module unload gcc ml -gcc Unload a module
module swap gcc icc ml -gcc icc Swap a module (unload gcc and replace it with icc)
module purge ml purge Remove all modules2
module save foo ml save foo Save the state of all loaded modules in a collection named foo
module restore foo ml restore foo Restore the state of saved modules from the foo collection

Additional module sub-commands are documented in the module help command. For complete reference, please refer to the official Lmod documentation.

Module properties#

Multiple versions

When multiple versions of the same module exist, module will load the one marked as Default (D). For the sake of reproducibility, we recommend always specifying the module version you want to load, as defaults may evolve over time.

To quickly see some of the modules characteristics, module avail will display colored property attributes next to the module names. The main module properties are:

  • S: Module is sticky, requires --force to unload or purge
  • L: Indicate currently loaded module
  • D: Default module that will be loaded when multiple versions are available
  • r: Restricted access, typically software under license. Contact us for details
  • g: GPU-accelerated software, will only run on GPU nodes
  • m: Software supports parallel execution using MPI

Searching for modules#

You can search through all the available modules for either:

  • a module name (if you already know it), using module spider
  • any string within modules names and descriptions, using module keyword

For instance, if you want to know how to load the gromacs module, you can do:

$ module spider gromacs
+

If you don't know the module name, or want to list all the modules that contain a specific string of characters in their name or description, you can use module keyword. For instance, the following command will list all the modules providing a BLAS library:

$ module keyword blas
+

Examples#

Listing#

To list all the modules that can be loaded, you can do:

$ ml av
+
+-- math -- numerical libraries, statistics, deep-learning, computer science ---
+   R/3.4.0             gsl/1.16             openblas/0.2.19
+   cudnn/5.1  (g)      gsl/2.3       (D)    py-scipystack/1.0_py27 (D)
+   cudnn/6.0  (g,D)    imkl/2017.u2         py-scipystack/1.0_py36
+   fftw/3.3.6          matlab/R2017a (r)
+
+------------------ devel -- compilers, MPI, languages, libs -------------------
+   boost/1.64.0          icc/2017.u2           python/2.7.13    (D)
+   cmake/3.8.1           ifort/2017.u2         python/3.6.1
+   cuda/8.0.61    (g)    impi/2017.u2   (m)    scons/2.5.1_py27 (D)
+   eigen/3.3.3           java/1.8.0_131        scons/2.5.1_py36
+   gcc/6.3.0      (D)    julia/0.5.1           sqlite/3.18.0
+   gcc/7.1.0             llvm/4.0.0            tbb/2017.u2
+   h5utils/1.12.1        nccl/1.3.4     (g)    tcltk/8.6.6
+   hdf5/1.10.0p1         openmpi/2.0.2  (m)
+
+-------------- categories -- load to make more modules available --------------
+   biology      devel (S,L)    physics    system
+   chemistry    math  (S,L)    staging    viz
+
+  Where:
+   S:  Module is Sticky, requires --force to unload or purge
+   r:  Restricted access
+   g:  GPU support
+   L:  Module is loaded
+   m:  MPI support
+   D:  Default Module
+
+Use "module spider" to find all possible modules.
+Use "module keyword key1 key2 ..." to search for all possible modules matching
+any of the "keys".
+

Searching#

To search for a specific string in modules names and descriptions, you can run:

$ module keyword numpy
+---------------------------------------------------------------------------
+
+The following modules match your search criteria: "numpy"
+---------------------------------------------------------------------------
+
+  py-scipystack: py-scipystack/1.0_py27, py-scipystack/1.0_py36
+    The SciPy Stack is a collection of open source software for scientific
+    computing in Python. It provides the following packages: numpy, scipy,
+    matplotlib, ipython, jupyter, pandas, sympy and nose.
+
+---------------------------------------------------------------------------
+[...]
+$ ml key compiler
+---------------------------------------------------------------------------
+
+The following modules match your search criteria: "compiler"
+---------------------------------------------------------------------------
+
+  cmake: cmake/3.8.1
+    CMake is an extensible, open-source system that manages the build
+    process in an operating system and in a compiler-independent manner.
+
+  gcc: gcc/6.3.0, gcc/7.1.0
+    The GNU Compiler Collection includes front ends for C, C++, Fortran,
+    Java, and Go, as well as libraries for these languages (libstdc++,
+    libgcj,...).
+
+  icc: icc/2017.u2
+    Intel C++ Compiler, also known as icc or icl, is a group of C and C++
+    compilers from Intel
+
+  ifort: ifort/2017.u2
+    Intel Fortran Compiler, also known as ifort, is a group of Fortran
+    compilers from Intel
+
+  llvm: llvm/4.0.0
+    The LLVM Project is a collection of modular and reusable compiler and
+    toolchain technologies. Clang is an LLVM native C/C++/Objective-C
+    compiler,
+
+---------------------------------------------------------------------------
+

To get information about a specific module, especially how to load it, the following command can be used:

$ module spider gromacs
+
+-------------------------------------------------------------------------------
+  gromacs: gromacs/2016.3
+-------------------------------------------------------------------------------
+    Description:
+      GROMACS is a versatile package to perform molecular dynamics, i.e.
+      simulate the Newtonian equations of motion for systems with hundreds to
+      millions of particles.
+
+    Properties:
+      GPU support      MPI support
+
+    You will need to load all module(s) on any one of the lines below before
+    the "gromacs/2016.3" module is available to load.
+
+      chemistry
+

Loading#

Loading a category module allows to get access to field-specific software:

$ ml chemistry
+$ ml av
+
+------------- chemistry -- quantum chemistry, molecular dynamics --------------
+   gromacs/2016.3 (g,m)    vasp/5.4.1 (g,r,m)
+
+-- math -- numerical libraries, statistics, deep-learning, computer science ---
+   R/3.4.0             gsl/1.16             openblas/0.2.19
+   cudnn/5.1  (g)      gsl/2.3       (D)    py-scipystack/1.0_py27 (D)
+   cudnn/6.0  (g,D)    imkl/2017.u2         py-scipystack/1.0_py36
+   fftw/3.3.6          matlab/R2017a (r)
+
+------------------ devel -- compilers, MPI, languages, libs -------------------
+   boost/1.64.0          icc/2017.u2           python/2.7.13    (D)
+   cmake/3.8.1           ifort/2017.u2         python/3.6.1
+   cuda/8.0.61    (g)    impi/2017.u2   (m)    scons/2.5.1_py27 (D)
+   eigen/3.3.3           java/1.8.0_131        scons/2.5.1_py36
+   gcc/6.3.0      (D)    julia/0.5.1           sqlite/3.18.0
+   gcc/7.1.0             llvm/4.0.0            tbb/2017.u2
+   h5utils/1.12.1        nccl/1.3.4     (g)    tcltk/8.6.6
+   hdf5/1.10.0p1         openmpi/2.0.2  (m)
+
+-------------- categories -- load to make more modules available --------------
+   biology          devel (S,L)    physics    system
+   chemistry (L)    math  (S,L)    staging    viz
+
+[...]
+

Resetting the modules environment#

If you want to reset your modules environment as it was when you initially connected to Sherlock, you can use the ml reset command: it will remove all the modules you have loaded, and restore the original state where only the math and devel categories are accessible.

If you want to remove all modules from your environment, including the default math and devel modules, you can use ml --force purge.

Loading modules in jobs#

In order for an application running in a Slurm job to have access to any necessary module-provided software packages, we recommend loading those modules in the job script directly. Since Slurm propagates all user environment variables by default, this is not strictly necessary, as jobs will inherit the modules loaded at submission time. But to make sure things are reproducible and avoid issues, it is preferable to explicitly load the modules in the batch scripts.

module load commands should be placed right after #SBATCH directives and before the actual executable calls. For instance:

#!/bin/bash
+#SBATCH ...
+#SBATCH ...
+#SBATCH ...
+
+ml reset
+ml load gromacs/2016.3
+
+srun gmx_mpi ...
+

Custom modules#

Users are welcome and encouraged to build and install their own software on Sherlock. To that end, and to facilitate usage or sharing of their custom software installations, they can create their own module repositories.

See the Software Installation page for more details.

Contributed software#

PI groups, labs or departments can share their software installations and modules with the whole Sherlock community of users, and let everyone benefit from their tuning efforts and software developments.

Those modules are available in the specific contribs category, and organized by contributor name.

For instance, listing the available contributed modules can be done with:

$ ml contribs
+$ ml av
+-------------------- contribs -- contributed software ----------------------
+   poldrack
+

To get information about a specific lab module:

$ ml show poldrack
+----------------------------------------------------------------------------
+   /share/software/modules/contribs/poldrack.lua:
+----------------------------------------------------------------------------
+prepend_path("MODULEPATH","/home/groups/russpold/modules")
+whatis("Name:        poldrack")
+whatis("Version:     1.0")
+whatis("Category:    contribs")
+whatis("URL:         https://github.com/poldracklab/lmod_modules")
+whatis("Description: Software modules contributed by the Poldrack Lab.")
+

And to list the available software modules contributed by the lab:

$ ml poldrack
+$ ml av
+
+------------------------ /home/groups/russpold/modules -------------------------
+   afni/17.3.03           freesurfer/6.0.1            gsl/2.3      (D)
+   anaconda/5.0.0-py36    fsl/5.0.9                   pigz/2.4
+   ants/2.1.0.post710     fsl/5.0.11           (D)    remora/1.8.2
+   c3d/1.1.0              git-annex/6.20171109        xft/2.3.2
+[...]
+

  1. If a module is not listed here, it might be unavailable in the loaded modules categories, and require loading another category module. Search for not-listed software using the module spider command. 

  2. The math and devel category modules will not be unloaded with module purge as they are "sticky". If a user wants to unload a sticky module, they must specify the --force option. 

\ No newline at end of file diff --git a/docs/software/overview/index.html b/docs/software/overview/index.html new file mode 100644 index 000000000..e0c38c74d --- /dev/null +++ b/docs/software/overview/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/docs/software/updates.xml b/docs/software/updates.xml new file mode 100644 index 000000000..18965351d --- /dev/null +++ b/docs/software/updates.xml @@ -0,0 +1,467 @@ + + + +Sherlock software updates +Sherlock software update feed +https://www.sherlock.stanford.edu/docs/software/list + + + New module: system/webkitgtk version 2.28.4 + WebKitGTK is a full-featured port of the WebKit rendering engine, suitable for projects requiring any kind of web integration, from hybrid HTML/CSS applications to full-fledged web browsers. + https://webkitgtk.org/ + https://www.sherlock.stanford.edu/docs/software/list/?add:v=2.28.4#webkitgtk + system, libs + kilian@stanford.edu (Kilian Cavalotti) + Fri, 17 May 2024 13:47:26 -0700 + + + New module: system/unifdef version 2.12 + The unifdef utility selectively processes conditional C preprocessor #if and #ifdef directives. + https://dotat.at/prog/unifdef/ + https://www.sherlock.stanford.edu/docs/software/list/?add:v=2.12#unifdef + system, tool + kilian@stanford.edu (Kilian Cavalotti) + Fri, 17 May 2024 13:47:16 -0700 + + + New module: biology/cellranger-atac version 2.1.0 + Cell Ranger ATAC is a set of analysis pipelines that process Chromium Single Cell ATAC data. + https://support.10xgenomics.com/single-cell-atac/software/pipelines/latest/what-is-cell-ranger-atac + https://www.sherlock.stanford.edu/docs/software/list/?add:v=2.1.0#cellranger-atac + biology, genomics + kilian@stanford.edu (Kilian Cavalotti) + Fri, 17 May 2024 10:07:43 -0700 + + + New version: math/py-tensorrt version 10.0.1_py312 + Python bindings for the TensorRT library. + https://developer.nvidia.com/tensorrt + https://www.sherlock.stanford.edu/docs/software/list/?add:v=10.0.1_py312#py-tensorrt + math, deep learning + kilian@stanford.edu (Kilian Cavalotti) + Wed, 15 May 2024 14:59:26 -0700 + + + New version: math/tensorrt version 10.0.1.6 + NVIDIA TensorRT™ is a high-performance deep learning inference optimizer and runtime that delivers low latency, high-throughput inference for deep learning applications. + https://developer.nvidia.com/tensorrt + https://www.sherlock.stanford.edu/docs/software/list/?add:v=10.0.1.6#tensorrt + math, deep learning + kilian@stanford.edu (Kilian Cavalotti) + Wed, 15 May 2024 14:59:13 -0700 + + + New module: physics/py-gdal-utils version 3.4.1_py39 + gdal-utils is the GDAL Python Utilities distribution. + https://pypi.org/project/gdal-utils/ + https://www.sherlock.stanford.edu/docs/software/list/?add:v=3.4.1_py39#py-gdal-utils + physics, geoscience + kilian@stanford.edu (Kilian Cavalotti) + Tue, 14 May 2024 11:36:58 -0700 + + + New module: system/py-nvitop version 1.3.2_py312 + An interactive NVIDIA-GPU process viewer and beyond. + https://github.com/XuehaiPan/nvitop + https://www.sherlock.stanford.edu/docs/software/list/?add:v=1.3.2_py312#py-nvitop + system, resource monitoring + kilian@stanford.edu (Kilian Cavalotti) + Tue, 7 May 2024 14:09:40 -0700 + + + New module: system/py-nvitop version 1.3.2_py39 + An interactive NVIDIA-GPU process viewer and beyond. + https://github.com/XuehaiPan/nvitop + https://www.sherlock.stanford.edu/docs/software/list/?add:v=1.3.2_py39#py-nvitop + system, resource monitoring + kilian@stanford.edu (Kilian Cavalotti) + Tue, 7 May 2024 14:09:40 -0700 + + + New module: system/py-nvitop version default + An interactive NVIDIA-GPU process viewer and beyond. + https://github.com/XuehaiPan/nvitop + https://www.sherlock.stanford.edu/docs/software/list/?add:v=default#py-nvitop + system, resource monitoring + kilian@stanford.edu (Kilian Cavalotti) + Tue, 7 May 2024 14:09:40 -0700 + + + New module: system/py-nvidia-ml-py version 12.550.52_py312 + Python bindings to the NVIDIA Management Library. + https://pypi.org/project/nvidia-ml-py/ + https://www.sherlock.stanford.edu/docs/software/list/?add:v=12.550.52_py312#py-nvidia-ml-py + system, tools + kilian@stanford.edu (Kilian Cavalotti) + Tue, 7 May 2024 14:09:12 -0700 + + + New module: system/py-nvidia-ml-py version 12.550.52_py39 + Python bindings to the NVIDIA Management Library. + https://pypi.org/project/nvidia-ml-py/ + https://www.sherlock.stanford.edu/docs/software/list/?add:v=12.550.52_py39#py-nvidia-ml-py + system, tools + kilian@stanford.edu (Kilian Cavalotti) + Tue, 7 May 2024 14:09:12 -0700 + + + New module: math/py-kaolin version 0.15.0_py39 + A PyTorch Library for Accelerating 3D Deep Learning Research. + https://github.com/NVIDIAGameWorks/kaolin + https://www.sherlock.stanford.edu/docs/software/list/?add:v=0.15.0_py39#py-kaolin + math, machine learning + kilian@stanford.edu (Kilian Cavalotti) + Thu, 2 May 2024 17:07:17 -0700 + + + New version: math/matlab version R2024a + MATLAB is a multi-paradigm numerical computing environment and proprietary programming language developed by MathWorks. + https://www.mathworks.com/products/matlab.html + https://www.sherlock.stanford.edu/docs/software/list/?add:v=R2024a#matlab + math, numerical analysis + kilian@stanford.edu (Kilian Cavalotti) + Thu, 2 May 2024 09:35:24 -0700 + + + New version: biology/fsl version 6.0.7.10 + FSL is a comprehensive library of analysis tools for FMRI, MRI and DTI brain imaging data. + https://fsl.fmrib.ox.ac.uk/fsl + https://www.sherlock.stanford.edu/docs/software/list/?add:v=6.0.7.10#fsl + biology, neurology + kilian@stanford.edu (Kilian Cavalotti) + Fri, 26 Apr 2024 19:45:58 -0700 + + + New version: chemistry/schrodinger version 2024-1 + Schrödinger Suites (Small-molecule Drug Discovery Suite, Material Science Suite, Biologics Suite) provide a set of molecular modelling software. + https://www.schrodinger.com + https://www.sherlock.stanford.edu/docs/software/list/?add:v=2024-1#schrodinger + chemistry, computational chemistry + kilian@stanford.edu (Kilian Cavalotti) + Thu, 25 Apr 2024 15:40:16 -0700 + + + New module: math/py-accelerate version 0.29.3_py312 + Huggingface Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration. + https://github.com/huggingface/accelerate + https://www.sherlock.stanford.edu/docs/software/list/?add:v=0.29.3_py312#py-accelerate + math, machine learning + kilian@stanford.edu (Kilian Cavalotti) + Tue, 23 Apr 2024 12:04:35 -0700 + + + New version: math/py-pywavelets version 1.6.0_py39 + PyWavelets is a free Open Source library for wavelet transforms in Python. + https://pywavelets.readthedocs.io/en/latest/ + https://www.sherlock.stanford.edu/docs/software/list/?add:v=1.6.0_py39#py-pywavelets + math, numerical library + kilian@stanford.edu (Kilian Cavalotti) + Fri, 19 Apr 2024 18:28:41 -0700 + + + New module: math/py-pywavelets version 1.6.0_py312 + PyWavelets is a free Open Source library for wavelet transforms in Python. + https://pywavelets.readthedocs.io/en/latest/ + https://www.sherlock.stanford.edu/docs/software/list/?add:v=1.6.0_py312#py-pywavelets + math, numerical library + kilian@stanford.edu (Kilian Cavalotti) + Fri, 19 Apr 2024 17:14:23 -0700 + + + New module: math/py-torchtune version 0.1.1_py312 + torchtune is a PyTorch-native library for easily authoring, fine-tuning and experimenting with LLMs. + https://github.com/pytorch/torchtune + https://www.sherlock.stanford.edu/docs/software/list/?add:v=0.1.1_py312#py-torchtune + math, machine learning + kilian@stanford.edu (Kilian Cavalotti) + Fri, 19 Apr 2024 11:51:46 -0700 + + + New version: biology/rosetta version 3.14 + Rosetta is the premier software suite for modeling macromolecular structures. As a flexible, multi-purpose application, it includes tools for structure prediction, design, and remodeling of proteins and nucleic acids. + https://www.rosettacommons.org + https://www.sherlock.stanford.edu/docs/software/list/?add:v=3.14#rosetta + biology, computational biology + kilian@stanford.edu (Kilian Cavalotti) + Thu, 18 Apr 2024 19:53:52 -0700 + + + New version: devel/py-scons version 4.7.0_py312 + SCons is an Open Source software construction tool. + http://www.scons.org + https://www.sherlock.stanford.edu/docs/software/list/?add:v=4.7.0_py312#py-scons + devel, build + kilian@stanford.edu (Kilian Cavalotti) + Thu, 18 Apr 2024 18:05:13 -0700 + + + New version: chemistry/lammps version 20230802 + LAMMPS is a classical molecular dynamics code that models an ensemble of particles in a liquid, solid, or gaseous state. + http://lammps.sandia.gov + https://www.sherlock.stanford.edu/docs/software/list/?add:v=20230802#lammps + chemistry, molecular dynamics + kilian@stanford.edu (Kilian Cavalotti) + Thu, 18 Apr 2024 16:37:11 -0700 + + + New module: devel/py-jupytext version 1.16.1_py39 + Jupyter Notebooks as Markdown Documents, Julia, Python or R scripts. + https://github.com/mwouts/jupytext + https://www.sherlock.stanford.edu/docs/software/list/?add:v=1.16.1_py39#py-jupytext + devel, IDE + kilian@stanford.edu (Kilian Cavalotti) + Wed, 10 Apr 2024 14:15:19 -0700 + + + New version: biology/py-macs2 version 2.2.9.1_py39 + MACS (Model-based Analysis of ChIP-Seq) implements a novel ChIP-Seq analysis method. + https://github.com/taoliu/MACS + https://www.sherlock.stanford.edu/docs/software/list/?add:v=2.2.9.1_py39#py-macs2 + biology, genomics + kilian@stanford.edu (Kilian Cavalotti) + Fri, 29 Mar 2024 15:38:30 -0700 + + + New module: math/py-tinygrad version 0.8.0_py312 + tinygrad is a deep learning framework that aims to provide a balance between simplicity and functionality. + https://github.com/tinygrad/tinygrad + https://www.sherlock.stanford.edu/docs/software/list/?add:v=0.8.0_py312#py-tinygrad + math, machine learning + kilian@stanford.edu (Kilian Cavalotti) + Fri, 29 Mar 2024 08:51:38 -0700 + + + New version: devel/py-h5py version 3.10.0_py312 + The h5py package is a Pythonic interface to the HDF5 binary data format. + http://www.h5py.org + https://www.sherlock.stanford.edu/docs/software/list/?add:v=3.10.0_py312#py-h5py + devel, lib + kilian@stanford.edu (Kilian Cavalotti) + Thu, 28 Mar 2024 18:13:23 -0700 + + + New version: devel/py-mpi4py version 3.1.5_py312 + MPI for Python provides Python bindings for the Message Passing Interface (MPI) standard. It is implemented on top of the MPI-1/2/3 specification and exposes an API which grounds on the standard MPI-2 C++ bindings. + https://bitbucket.org/mpi4py/mpi4py + https://www.sherlock.stanford.edu/docs/software/list/?add:v=3.1.5_py312#py-mpi4py + devel, mpi + kilian@stanford.edu (Kilian Cavalotti) + Thu, 28 Mar 2024 18:02:36 -0700 + + + New module: math/py-torch-nvidia-apex version 23.08_py312 + A PyTorch Extension + https://github.com/NVIDIA/apex + https://www.sherlock.stanford.edu/docs/software/list/?add:v=23.08_py312#py-torch-nvidia-apex + math, machine learning + kilian@stanford.edu (Kilian Cavalotti) + Thu, 28 Mar 2024 17:43:56 -0700 + + + New version: devel/nccl version 2.20.5 + NCCL (pronounced 'Nickel') is a stand-alone library of standard collective communication routines, such as all-gather, reduce, broadcast, etc., that have been optimized to achieve high bandwidth over PCIe. + https://github.com/NVIDIA/nccl + https://www.sherlock.stanford.edu/docs/software/list/?add:v=2.20.5#nccl + devel, lib + kilian@stanford.edu (Kilian Cavalotti) + Thu, 28 Mar 2024 11:55:24 -0700 + + + New module: math/py-datasets version 2.18.0_py312 + Hugging Face Datasets is a library for easily accessing and sharing datasets for Audio, Computer Vision, and Natural Language Processing (NLP) tasks. + https://huggingface.co/docs/datasets + https://www.sherlock.stanford.edu/docs/software/list/?add:v=2.18.0_py312#py-datasets + math, machine learning + kilian@stanford.edu (Kilian Cavalotti) + Wed, 27 Mar 2024 19:10:33 -0700 + + + New version: math/py-torchvision version 0.17.1_py312 + Datasets, model architectures, and common image transformations for computer vision for PyTorch. + http://pytorch.org/vision + https://www.sherlock.stanford.edu/docs/software/list/?add:v=0.17.1_py312#py-torchvision + math, deep learning + kilian@stanford.edu (Kilian Cavalotti) + Wed, 27 Mar 2024 18:57:14 -0700 + + + New version: devel/py-pandas version 2.2.1_py312 + pandas is an open source, BSD-licensed library providing high-performance, easy-to-use data structures and data analysis tools for the Python programming language. + https://pandas.pydata.org/ + https://www.sherlock.stanford.edu/docs/software/list/?add:v=2.2.1_py312#py-pandas + devel, data + kilian@stanford.edu (Kilian Cavalotti) + Wed, 27 Mar 2024 18:33:04 -0700 + + + New module: math/py-transformers version 4.39.1_py312 + Hugging Face Transformers provides APIs and tools to easily download and train state-of-the-art pretrained models. + https://huggingface.co/docs/transformers + https://www.sherlock.stanford.edu/docs/software/list/?add:v=4.39.1_py312#py-transformers + math, machine learning + kilian@stanford.edu (Kilian Cavalotti) + Wed, 27 Mar 2024 18:25:34 -0700 + + + New module: math/py-tokenizers version 0.15.2_py312 + Hugging Face Tokenizers provides an implementation of today’s most used tokenizers, with a focus on performance and versatility.T + https://huggingface.co/docs/tokenizers + https://www.sherlock.stanford.edu/docs/software/list/?add:v=0.15.2_py312#py-tokenizers + math, machine learning + kilian@stanford.edu (Kilian Cavalotti) + Wed, 27 Mar 2024 17:50:44 -0700 + + + New module: math/py-safetensors version 0.4.2_py312 + Simple, safe way to store and distribute tensors. + https://huggingface.co/docs/safetensors + https://www.sherlock.stanford.edu/docs/software/list/?add:v=0.4.2_py312#py-safetensors + math, machine learning + kilian@stanford.edu (Kilian Cavalotti) + Wed, 27 Mar 2024 17:37:36 -0700 + + + New module: math/py-huggingface-hub version 0.22.1_py312 + The huggingface_hub library allows you to interact with the Hugging Face Hub, a machine learning platform for creators and collaborators. + https://huggingface.co/docs/huggingface_hub + https://www.sherlock.stanford.edu/docs/software/list/?add:v=0.22.1_py312#py-huggingface-hub + math, machine learning + kilian@stanford.edu (Kilian Cavalotti) + Wed, 27 Mar 2024 17:25:11 -0700 + + + New version: devel/py-ipython version 8.22.2_py312 + IPython is a command shell for interactive computing in multiple programming languages, originally developed for the Python programming language. + https://ipython.org + https://www.sherlock.stanford.edu/docs/software/list/?add:v=8.22.2_py312#py-ipython + devel, language + kilian@stanford.edu (Kilian Cavalotti) + Wed, 27 Mar 2024 16:46:41 -0700 + + + New version: viz/py-matplotlib version 3.8.3_py312 + Matplotlib is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. + http://matplotlib.org + https://www.sherlock.stanford.edu/docs/software/list/?add:v=3.8.3_py312#py-matplotlib + viz, plotting + kilian@stanford.edu (Kilian Cavalotti) + Wed, 27 Mar 2024 11:49:39 -0700 + + + New version: viz/py-pillow-simd version 10.2.0_py312 + Pillow-SIMD is an optimized version of Pillow + https://github.com/uploadcare/pillow-simd + https://www.sherlock.stanford.edu/docs/software/list/?add:v=10.2.0_py312#py-pillow-simd + viz, imaging + kilian@stanford.edu (Kilian Cavalotti) + Wed, 27 Mar 2024 11:48:55 -0700 + + + New version: viz/py-pillow version 10.2.0_py312 + Pillow is a friendly PIL (Python Imaging Library) fork. + https://python-pillow.org + https://www.sherlock.stanford.edu/docs/software/list/?add:v=10.2.0_py312#py-pillow + viz, imaging + kilian@stanford.edu (Kilian Cavalotti) + Wed, 27 Mar 2024 11:48:55 -0700 + + + New version: math/py-pytorch version 2.2.1_py312 + PyTorch is a deep learning framework that puts Python first. + http://pytorch.org + https://www.sherlock.stanford.edu/docs/software/list/?add:v=2.2.1_py312#py-pytorch + math, deep learning + kilian@stanford.edu (Kilian Cavalotti) + Wed, 27 Mar 2024 11:11:15 -0700 + + + New version: math/py-scipy version 1.12.0_py312 + The SciPy library provides many user-friendly and efficient numerical routines such as routines for numerical integration and optimization. + https://www.scipy.org/scipylib + https://www.sherlock.stanford.edu/docs/software/list/?add:v=1.12.0_py312#py-scipy + math, numerical library + kilian@stanford.edu (Kilian Cavalotti) + Wed, 27 Mar 2024 11:09:56 -0700 + + + New version: devel/openmpi version 4.1.6 + The Open MPI Project is an open source Message Passing Interface implementation that is developed and maintained by a consortium of academic, research, and industry partners. + https://www.openmpi.org + https://www.sherlock.stanford.edu/docs/software/list/?add:v=4.1.6#openmpi + devel, mpi + kilian@stanford.edu (Kilian Cavalotti) + Tue, 26 Mar 2024 16:25:25 -0700 + + + New version: devel/ucx version 1.15.0 + UCX is a communication library implementing high-performance messaging for MPI/PGAS frameworks. + http://www.openucx.org + https://www.sherlock.stanford.edu/docs/software/list/?add:v=1.15.0#ucx + devel, networking + kilian@stanford.edu (Kilian Cavalotti) + Tue, 26 Mar 2024 16:07:22 -0700 + + + New version: math/opencv version 4.9.0 + OpenCV (Open Source Computer Vision Library) is an open source computer vision and machine learning software library. + https://opencv.org + https://www.sherlock.stanford.edu/docs/software/list/?add:v=4.9.0#opencv + math, lib + kilian@stanford.edu (Kilian Cavalotti) + Fri, 22 Mar 2024 18:43:21 -0700 + + + New version: math/cudnn version 9.0.0.312 + NVIDIA cuDNN is a GPU-accelerated library of primitives for deep neural networks. + https://developer.nvidia.com/cudnn + https://www.sherlock.stanford.edu/docs/software/list/?add:v=9.0.0.312#cudnn + math, deep learning + kilian@stanford.edu (Kilian Cavalotti) + Fri, 22 Mar 2024 17:21:35 -0700 + + + New version: biology/salmon version 1.10.0 + Highly-accurate & wicked fast transcript-level quantification from RNA-seq reads using lightweight alignments. + https://combine-lab.github.io/salmon + https://www.sherlock.stanford.edu/docs/software/list/?add:v=1.10.0#salmon + biology, genomics + kilian@stanford.edu (Kilian Cavalotti) + Wed, 6 Mar 2024 14:17:16 -0800 + + + New version: devel/cuda version 12.4.0 + CUDA is a parallel computing platform and application programming interface (API) model created by Nvidia. It allows software developers and software engineers to use a CUDA-enabled graphics processing unit (GPU) for general purpose processing. + https://developer.nvidia.com/cuda-toolkit + https://www.sherlock.stanford.edu/docs/software/list/?add:v=12.4.0#cuda + devel, language + kilian@stanford.edu (Kilian Cavalotti) + Tue, 5 Mar 2024 14:39:10 -0800 + + + New version: viz/py-plotly version 5.19.0_py312 + Plotly's Python graphing library makes interactive, publication-quality graphs online. + https://plot.ly/python/ + https://www.sherlock.stanford.edu/docs/software/list/?add:v=5.19.0_py312#py-plotly + viz, plotting + kilian@stanford.edu (Kilian Cavalotti) + Wed, 21 Feb 2024 10:02:42 -0800 + + + New version: viz/py-plotly version 5.19.0_py39 + Plotly's Python graphing library makes interactive, publication-quality graphs online. + https://plot.ly/python/ + https://www.sherlock.stanford.edu/docs/software/list/?add:v=5.19.0_py39#py-plotly + viz, plotting + kilian@stanford.edu (Kilian Cavalotti) + Wed, 21 Feb 2024 10:02:42 -0800 + + + Removed version: system/google-cloud-sdk version 338.0.0 + google-cloud-sdk/338.0.0 + https://www.sherlock.stanford.edu/docs/software/list/?del:v=338.0.0#google-cloud-sdk + system + kilian@stanford.edu (Kilian Cavalotti) + Tue, 7 May 2024 17:31:23 -0700 + + + diff --git a/docs/software/using/R/index.html b/docs/software/using/R/index.html new file mode 100644 index 000000000..cc0a1851a --- /dev/null +++ b/docs/software/using/R/index.html @@ -0,0 +1,310 @@ + R - Sherlock

R

Introduction#

R is a programming language and software environment for statistical computing and graphics. It is similar to the S language and environment developed at Bell Laboratories. R provides a wide variety of statistical and graphical techniques and is highly extensible.

More documentation#

The following documentation is specifically intended for using R on Sherlock. For more complete documentation about R in general, please see the R documentation.

R on Sherlock#

R is available on Sherlock and the corresponding module can be loaded with:

$ ml R
+

For a list of available versions, you can execute ml spider R at the Sherlock prompt, or refer to the Software list page.

Using R#

Once your environment is configured (ie. when the R module is loaded), R can be started by simply typing R at the shell prompt:

$ R
+
+R version 3.5.1 (2018-07-02) -- "Feather Spray"
+Copyright (C) 2018 The R Foundation for Statistical Computing
+Platform: x86_64-pc-linux-gnu (64-bit)
+[...]
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+>
+

For a listing of command line options:

$ R --help
+

Running a R script#

There are several ways to launch an R script on the command line, which will have different ways of presenting the script's output:

Method Output
Rscript script.R displayed on screen, on stdout
R CMD BATCH script.R redirected to a script.Rout file
R --no-save < script.R displayed on screen, on stdout

Submitting a R job#

Here's an example R batch script that can be submitted via sbatch. It runs a simple matrix multiplication example, and demonstrates how to feed R code as a HEREDOC to R directly, so no intermediate R script is necessary:

#!/usr/bin/bash
+#SBATCH --time=00:10:00
+#SBATCH --mem=10G
+#SBATCH --output=Rtest.log
+
+# load the module
+ml R
+
+# run R code
+R --no-save << EOF
+set.seed (1)
+m <- 4000
+n <- 4000
+A <- matrix (runif (m*n),m,n)
+system.time (B <- crossprod(A))
+EOF
+

You can save this script as Rtest.sbatch and submit it to the scheduler with:

$ sbatch Rtest.sbatch
+

Once the job is done, you should get a Rtest.out file in the current directory, with the following contents:

R version 3.5.1 (2018-07-02) -- "Feather Spray"
+[...]
+> set.seed (1)
+> m <- 4000
+> n <- 4000
+> A <- matrix (runif (m*n),m,n)
+> system.time (B <- crossprod(A))
+   user  system elapsed
+  2.649   0.077   2.726
+

R packages#

R comes with a single package library in $R_HOME/library, which contains the standard and most common packages. This is usually in a system location and is not writable by end-users.

To accommodate individual user's requirements, R provides a way for each user to install packages in the location of their choice. The default value for a directory where users can install their own R packages is $HOME/R/x86_64-pc-linux-gnu-library/<R_version> where <R_version> depends on the R version that is used. For instance, if you have the R/3.5.1 module loaded, the default R user library path will be $HOME/R/x86_64-pc-linux-gnu-library/3.5.

This directory doesn't exist by default. The first time a user installs a package, R will ask if she wants to use the default location and create the directory.

Installing packages#

Install R packages in a standard shell session

Make sure to install your packages in a standard Sherlock shell session, not in an RStudio session.

To install a R package in your personal environment, the first thing to do is load the R module:

$ ml R
+

Then start a R session, and use the install.packages() function at the R prompt. For instance, the following example will install the doParallel package, using the US mirror of the CRAN repository:

$ R
+
+R version 3.5.1 (2018-07-02) -- "Feather Spray"
+[...]
+
+> install.packages('doParallel', repos='http://cran.us.r-project.org')
+

It should give the following warning:

Warning in install.packages("doParallel", repos = "http://cran.us.r-project.org") :
+  'lib = "/share/software/user/open/R/3.5.1/lib64/R/library"' is not writable
+Would you like to use a personal library instead? (yes/No/cancel)
+Would you like to create a personal library
+‘~/R/x86_64-pc-linux-gnu-library/3.5’
+to install packages into? (yes/No/cancel) y
+

Answering y twice will make R create a ~/R/x86_64-pc-linux-gnu-library/3.5 directory and instruct it to install future R packages there.

The installation will then proceed:

trying URL 'http://cran.us.r-project.org/src/contrib/doParallel_1.0.14.tar.gz'
+Content type 'application/x-gzip' length 173607 bytes (169 KB)
+==================================================
+downloaded 169 KB
+
+* installing *source* package ‘doParallel’ ...
+** package ‘doParallel’ successfully unpacked and MD5 sums checked
+** R
+** demo
+** inst
+** byte-compile and prepare package for lazy loading
+** help
+*** installing help indices
+** building package indices
+** installing vignettes
+** testing if installed package can be loaded
+* DONE (doParallel)
+
+The downloaded source packages are in
+        ‘/tmp/Rtmp0RHrMZ/downloaded_packages’
+>
+

and when it's done, you should be able to load the package within R with:

> library(doParallel)
+Loading required package: foreach
+Loading required package: iterators
+Loading required package: parallel
+>
+
Installing large packages#

Installing large R packages can sometimes be very time consuming. To speed things up, R can utilize multiple CPUs in parallel when the Ncpus=n option is added to the install.packages() command (where n is the number of CPUs you'd like to use).

For instance, you can get an interactive session with 4 CPU cores with sh_dev:

$ sh_dev -c 4
+$ ml R
+$ R
+> install.packages("dplyr", repos = "http://cran.us.r-project.org", Ncpus=4)
+
Alternative installation path#

To install R packages in a different location, you'll need to create that directory, and instruct R to install the packages there:

$ mkdir ~/R_libs/
+$ R
+> install.packages('doParallel', repos='http://cran.us.r-project.org', lib="~/R_libs")
+

The installation will proceed normally and the doParallel package will be installed in $HOME/R_libs/.

Specifying the full destination path for each package installation could quickly become tiresome, so to avoid this, you can create a .Renviron file in your $HOME directory, and define your R_libs path there:

$ cat << EOF > $HOME/.Renviron
+R_LIBS=~/R_libs
+EOF
+

With this, whenever R is started, the $HOME/R_libs/ directory will be added to the list of places R will look for packages, and you won't need to specify this installation path when using install.packages() anymore.

Where does R look for packages?

To see the directories where R searches for packages and libraries, you can use the following command in R:

> .libPaths()
+

Sharing R packages

If you'd like to share R packages within your group, you can simply define $R_LIBS to point to a shared directory, such as $GROUP_HOME/R_libs and have each user in the group use the instructions below to define it in their own environment.

Setting the installation repository#

When installing a package, R needs to know from which repository the package should be downloaded. If it's not specified, it will prompt for it and display a list of available CRAN mirrors.

To avoid setting the CRAN mirror each time you run install.packages you can permanently set the mirror by creating a .Rprofile file in your $HOME directory, which R will execute each time it starts.

For instance, adding the following contents to your ~/.Rprofile will make sure that every install.packages() invocation will use the closest CRAN mirror:

## local creates a new, empty environment
+## This avoids polluting the global environment with
+## the object r
+local({
+  r = getOption("repos")
+  r["CRAN"] = "https://cloud.r-project.org/"
+  options(repos = r)
+})
+

Once this is set, you only need to specify the name of the package to install, and R will use the mirror you defined automatically:

> install.packages("doParallel")
+[...]
+trying URL 'https://cloud.r-project.org/src/contrib/doParallel_1.0.14.tar.gz'
+Content type 'application/x-gzip' length 173607 bytes (169 KB)
+==================================================
+downloaded 169 KB
+
Installing packages from GitHub#

R packages can be directly installed from GitHub using the devtools package. devtools needs to be installed first, with:

> install.packages("devtools")
+

And then, you can then install a R package directly from its GitHub repository. For instance, to install dplyr from tidyverse/dplyr:

> library(devtools)
+> install_github("tidyverse/dplyr")
+

Package dependencies#

Sometimes when installing R packages, other software is needed for the installation and/or compilation. For instance, when trying to install the sf package, you may encounter the following error messages:

> install.packages("sf")
+[...]
+Configuration failed because libudunits2.so was not found. Try installing:...
+[...]
+configure: error: gdal-config not found or not executable.
+

This is because sf needs a few dependencies, like udunits and gdal in order to compile and install successfully. Fortunately those dependencies are already available as modules on Sherlock.

Whenever you see "not found" errors, you may want to try searching the modules inventory with module spider:

$ module spider udunits
+
+----------------------------------------------------------------------------
+  udunits: udunits/2.2.26
+----------------------------------------------------------------------------
+    Description:
+      The UDUNITS package from Unidata is a C-based package for the
+      programmatic handling of units of physical quantities.
+
+
+    You will need to load all module(s) on any one of the lines below before
+    the "udunits/2.2.26" module is available to load.
+
+      physics
+

So for sf, in order to load the dependencies, exit R, load the udunits and gdal modules, and try installing sf again:

$ ml load physics udunits gdal geos
+$ ml R/4.3.2
+$ R
+> install.packages("sf")
+

Getting dependencies right could be a matter of trial and error. You may have to load R, install packages, search modules, load modules, install packages again and so forth. Fortunately, R packages only need to be installed once, and many R package dependencies are already available as modules on Sherlock, you just need to search for them with module spider and load them.

And in case you're stuck, you can of course always send us an email and we'll be happy to assist.

Updating Packages#

To upgrade R packages, you can use the update.packages() function within a R session.

For instance, to update the doParallel package:

> update.packages('doParallel')
+

When the package name is omitted, update.packages() will try to update all the packages that are installed. Which is the most efficient way to ensure that all the packages in your local R library are up to date.

Centrally installed packages can not be updated

Note that attempting to update centrally installed packages will fail. You will have to use install.packages() to install your own version of the packages in your $HOME directory instead.

Removing packages#

To remove a package from your local R library, you can use the remove.packages() function. For instance:

> remove.packages('doParallel')
+

Examples#

Installing devtools#

devtools is a package that provides R functions that simplify many common tasks. While its core functionality revolves around package development, devtools can also be used to install packages, particularly those on GitHub.

Installing devtools is somewhat memory-intensive and has several dependencies. The following example shows how to run an interactive session with 4 CPUs, load the modules for the necessary dependencies, and install devtools for R version 4.2.0.

# Launch interactive dev session with 4 CPUs
+
+$ sh_dev -c 4
+
+# Load the required modules
+
+$ ml purge
+$ ml R/4.2.0
+$ ml system harfbuzz fribidi
+$ ml cmake libgit2
+$ ml openssl
+
+# Launch R and install devtools
+
+$ R
+> install.packages("devtools", repos = "http://cran.us.r-project.org", Ncpus=4)
+

Single node#

R has a couple of powerful and easy-to-use tools to parallelize your R jobs. doParallel is one of them. If the doParallel package is not installed in your environment yet, you can install it in a few easy steps.

Here is a quick doParallel example that uses one node and 16 cores on Sherlock (more nodes or CPU cores can be requested, as needed).

Save the two scripts below in a directory on Sherlock:

# Example doParallel script
+
+if(!require(doParallel)) install.packages("doParallel")
+library(doParallel)
+
+# use the environment variable SLURM_NTASKS_PER_NODE to set
+# the number of cores to use
+registerDoParallel(cores=(Sys.getenv("SLURM_NTASKS_PER_NODE")))
+
+# bootstrap iteration example
+x <- iris[which(iris[,5] != "setosa"), c(1,5)]
+iterations <- 10000# Number of iterations to run
+
+# parallel loop
+# note the '%dopar%' instruction
+parallel_time <- system.time({
+  r <- foreach(icount(iterations), .combine=cbind) %dopar% {
+    ind <- sample(100, 100, replace=TRUE)
+    result1 <- glm(x[ind,2]~x[ind,1], family=binomial(logit))
+    coefficients(result1)
+  }
+})[3]
+
+# show the number of parallel workers to be used
+getDoParWorkers()
+
+# execute the function
+parallel_time
+
#!/bin/bash
+
+#SBATCH --nodes=1
+#SBATCH --ntasks-per-node=16
+#SBATCH --output=doParallel_test.log
+
+# --ntasks-per-node will be used in doParallel_test.R to specify the number
+# of cores to use on the machine.
+
+# load modules
+ml R/3.5.1
+
+# execute script
+Rscript doParallel_test.R
+

And then submit the job with:

$ sbatch doParallel_test.sbatch
+

Once the job has completed, the output file should contain something like this:

$ cat doParallel_test.out
+[1] "16"
+elapsed
+  3.551
+

Bonus points: observe the scalability of the doParallel loop by submitting the same script using a varying number of CPU cores:

$ for i in 2 4 8 16; do
+    sbatch --out=doP_${i}.out --ntasks-per-node=$i doParallel_test.sbatch
+done
+

When the jobs are done:

$ for i in 2 4 8 16; do
+    printf "%2i cores: %4.1fs\n" $i $(tail -n1 doP_$i.out)
+done
+ 2 cores: 13.6s
+ 4 cores:  7.8s
+ 8 cores:  4.9s
+16 cores:  3.6s
+

Multiple nodes#

To distribute parallel R tasks on multiple nodes, you can use the Rmpi package, which provides MPI bindings for R.

To install the Rmpi package, a module providing MPI library must first be loaded. For instance:

$ ml openmpi R
+$ R
+> install.packages("Rmpi")
+

Once the package is installed, the following scripts demonstrate a very basic Rmpi example.

# Example Rmpi script
+
+if (!require("Rmpi")) install.packages("Rmpi")
+library(Rmpi)
+
+# initialize an Rmpi environment
+ns <- mpi.universe.size() - 1
+mpi.spawn.Rslaves(nslaves=ns, needlog=TRUE)
+
+# send these commands to the slaves
+mpi.bcast.cmd( id <- mpi.comm.rank() )
+mpi.bcast.cmd( ns <- mpi.comm.size() )
+mpi.bcast.cmd( host <- mpi.get.processor.name() )
+
+# all slaves execute this command
+mpi.remote.exec(paste("I am", id, "of", ns, "running on", host))
+
+# close down the Rmpi environment
+mpi.close.Rslaves(dellog = FALSE)
+mpi.exit()
+
#!/bin/bash
+
+#SBATCH --nodes=2
+#SBATCH --ntasks=4
+#SBATCH --output=Rmpi-test.log
+
+## load modules
+# openmpi is not loaded by default with R, so it must be loaded explicitly
+ml R openmpi
+
+## run script
+# we use '-np 1' since Rmpi does its own task management
+mpirun -np 1 Rscript Rmpi-test.R
+

You can save those scripts as Rmpi-test.R and Rmpi-test.sbatch and then submit your job with:

$ sbatch Rmpi-test.sbatch
+

When the job is done, its output should look like this:

$ cat Rmpi-test.log
+        3 slaves are spawned successfully. 0 failed.
+master (rank 0, comm 1) of size 4 is running on: sh-06-33
+slave1 (rank 1, comm 1) of size 4 is running on: sh-06-33
+slave2 (rank 2, comm 1) of size 4 is running on: sh-06-33
+slave3 (rank 3, comm 1) of size 4 is running on: sh-06-34
+$slave1
+[1] "I am 1 of 4 running on sh-06-33"
+
+$slave2
+[1] "I am 2 of 4 running on sh-06-33"
+
+$slave3
+[1] "I am 3 of 4 running on sh-06-34"
+
+[1] 1
+[1] "Detaching Rmpi. Rmpi cannot be used unless relaunching R."
+

GPUs#

Here's a quick example that compares running a matrix multiplication on a CPU and on a GPU using R. It requires submitting a job to a GPU node and the gpuR R package.

# Example gpuR script
+
+if (!require("gpuR")) install.packages("gpuR")
+library(gpuR)
+
+print("CPU times")
+for(i in seq(1:7)) {
+    ORDER = 64*(2^i)
+    A = matrix(rnorm(ORDER^2), nrow=ORDER)
+    B = matrix(rnorm(ORDER^2), nrow=ORDER)
+    print(paste(i, sprintf("%5.2f", system.time({C = A %*% B})[3])))
+}
+
+print("GPU times")
+for(i in seq(1:7)) {
+    ORDER = 64*(2^i)
+    A = matrix(rnorm(ORDER^2), nrow=ORDER)
+    B = matrix(rnorm(ORDER^2), nrow=ORDER)
+    gpuA = gpuMatrix(A, type="double")
+    gpuB = gpuMatrix(B, type="double")
+    print(paste(i, sprintf("%5.2f", system.time({gpuC = gpuA %*% gpuB})[3])))
+}
+
#!/bin/bash
+
+#SBATCH --partition gpu
+#SBATCH --mem 8GB
+#SBATCH --gres gpu:1
+#SBATCH --output=gpuR-test.log
+
+## load modules
+# cuda is not loaded by default with R, so it must be loaded explicitly
+ml R cuda
+
+Rscript gpuR-test.R
+

After submitting the job with sbatch gpuR-test.sbatch, the output file should contain something like this:

[1] "CPU times"
+[1] "1  0.00"
+[1] "2  0.00"
+[1] "3  0.02"
+[1] "4  0.13"
+[1] "5  0.97"
+[1] "6  7.56"
+[1] "7 60.47"
+
+[1] "GPU times"
+[1] "1  0.10"
+[1] "2  0.04"
+[1] "3  0.02"
+[1] "4  0.07"
+[1] "5  0.39"
+[1] "6  2.04"
+[1] "7 11.59"
+

which shows a decent speedup for running on a GPU for the largest matrix sizes.

\ No newline at end of file diff --git a/docs/software/using/anaconda/index.html b/docs/software/using/anaconda/index.html new file mode 100644 index 000000000..1b01c0618 --- /dev/null +++ b/docs/software/using/anaconda/index.html @@ -0,0 +1 @@ + Anaconda - Sherlock

Anaconda

Introduction#

Anaconda is a Python/R distribution that aims to simplify package management and deployment for scientific computing. Although it can have merits on individual computers, it's often counter-productive on shared HPC systems like Sherlock.

Avoid using Anaconda on Sherlock

We recommend NOT using Anaconda on Sherlock, and instead consider other options like virtual environments or containers.

Why Anaconda should be avoided on Sherlock#

Anaconda is widely used in several scientific domain like data science, AI/ML, bio-informatics, and is often listed in some software documentation as the recommended (if not only) way to install it

It is a useful solution for simplifying the management of Python and scientific libraries on a personal computer. However, on highly-specialized HPC systems like Sherlock, management of these libraries and dependencies should be done by Stanford Research Computing staff, to ensure compatibility and optimal performance on the cluster hardware.

For instance:

  • Anaconda very often installs software (compilers, scientific libraries etc.) which already exist on our Sherlock as modules, and does so in a sub-optimal fashion, by installing sub-optimal versions and configurations,
  • It installs binaries which are not optimized for the processor architectures on Sherlock,
  • it makes incorrect assumptions about the location of various system libraries,
  • Anaconda installs software in $HOME by default, where it writes large amounts of files. A single Anaconda installation can easily fill up your $HOME directory quota, and makes things difficult to manage,
  • Anaconda installations can't easily be relocated,
  • Anaconda modifies your $HOME/.bashrc file, which can easily cause conflicts and slow things down when you log in.

Worse, a Conda recipe can force the installation of R (even though it's already available on Sherlock). This installation won't perform nearly as well as the version we provide as a module (which uses optimized libraries), or not at all, the jobs launched with it may crash and end up wasting both computing resources and your time.

Installation issues

If you absolutely need to install anaconda/miniconda, please note that because of the large number of files that the installer will try to open, this will likely fail on a login node. So make sure to run the installation on a compute node, for instance using the sh_dev command.

What to do instead#

Use a virtual environment#

Instead of using Anaconda for your project, or when the installation instructions of the software you want to install are using it, you can use a virtual environment.

A virtual environment offers all the functionality you need to use Python on Sherlock. You can convert Anaconda instructions and use a virtual environment instead, by following these steps:

  1. list the dependencies (also called requirements) of the application you want to use:
    • check if there is a requirements.txt file in the Git repository or in the software sources,
    • or, check the variable install_requires of in the setup.py file, which lists the requirements.
  2. find which dependencies are Python modules and which are libraries provided by Anaconda. For example, CUDA and CuDNN are libraries that Anaconda can install, but which should not be re-installed as they are already available as modules on Sherlock,
  3. remove from the list of dependencies everything which is not a Python module (e.g. cudatoolkit and cudnn),
  4. create a virtual environment to install your dependencies.

And that's it: your software should run, without Anaconda. If you have any issues, please don't hesitate to contact us.

Use a container#

In some situations, the complexity of a program's dependencies requires the use of a solution where you can control the entire software environment. In these situations, we recommend using a container.

Tip

Existing Docker images can easily be converted into Apptainer/Singularity images.

The only potential downside of using containers is their size and the associated storage usage. But if your research group plans on using several container images, it could be useful to collect them all in a single location (like $GROUP_HOME) to avoid duplication.

\ No newline at end of file diff --git a/docs/software/using/clustershell/index.html b/docs/software/using/clustershell/index.html new file mode 100644 index 000000000..afb5f0da6 --- /dev/null +++ b/docs/software/using/clustershell/index.html @@ -0,0 +1,87 @@ + ClusterShell - Sherlock

ClusterShell

Introduction#

ClusterShell is a command-line tool and library that helps running commands in parallel on multiple servers. It allows executing arbitrary commands across multiple hosts. On Sherlock, it provides an easy way to run commands on nodes your jobs are running on, and collect back information. The two most useful commands provided are cluset, which can manipulate lists of nodenames, and clush, which can run commands on multiple nodes at once.

More documentation#

The following documentation specifically intended for using ClusterShell on Sherlock. For more complete documentation about ClusterShell in general, please see the ClusterShell documentation.

The ClusterShell library can also be directly be integrated in your Python scripts, to add a wide range of functionality. See the ClusterShell Python API documentation for reference.

ClusterShell on Sherlock#

ClusterShell is available on Sherlock and the corresponding module can be loaded with:

$ ml system py-clustershell
+

cluset#

The cluset command can be used to easily manipulate lists of node names, and to expand, fold, or count them:

$ cluset --expand sh03-01n[01-06]
+sh03-01n01 sh03-01n02 sh03-01n03 sh03-01n04 sh03-01n05 sh03-01n06
+
+$ cluset --count sh03-01n[01-06]
+6
+
+$ cluset --fold sh03-01n01 sh03-01n02 sh03-01n03 sh03-01n06
+sh03-01n[01-03,06]
+

clush#

The clush command uses the same node list syntax to allow running the same commands simultaneously on those nodes. clush uses SSH to connect to each of these nodes.

Warning

You can only SSH to nodes where your jobs are running, and as a consequence, clush will only work on those nodes.

For instance, to check the load on multiple compute nodes at once:

$ clush -w sh03-01n[01-03] cat /proc/loadavg
+sh03-01n01: 19.48 14.43 11.76 22/731 22897
+sh03-01n02: 13.20 13.29 13.64 14/831 1163
+sh03-01n03: 11.60 11.48 11.82 18/893 23945
+

Gathering identical output

Using the the -b option will regroup similar output lines to make large outputs easier to read. By default, the output of each node will be presented separately.

For instance, without -b:

$ clush -w sh03-01n[01-03] echo ok
+sh03-01n02: ok
+sh03-01n03: ok
+sh03-01n01: ok
+

With -b:

$ clush -bw sh03-01n[01-03] echo ok
+---------------
+sh03-01n[01-03] (3)
+---------------
+ok
+

Slurm integration#

On Sherlock, ClusterShell is also tightly integrated with the job scheduler, and can directly provide information about a user's jobs and the nodes they're running on. You can use the following groups to get specific node lists:

group name short name action example
@user: @u: list nodes where user has jobs running cluset -f @user:$USER
@job: @j: list nodes where job is running cluset -f @job:123456
@nodestate: @node:,@n: list nodes in given state cluset -f @nodestate:idle
@partition: @part:,@p: list nodes in given partition cluset -f @partition:gpu

For instance, to get the list of nodes where job 123456 is running:

$ cluset -f @job:123456`
+

Examples#

Job information#

For instance, if job 1988522 from user kilian is running on nodes sh02-01n[59-60], squeue would display this:

$ squeue -u kilian
+       JOBID PARTITION     NAME     USER ST       TIME  NODES NODELIST(REASON)
+     1988522    normal interact   kilian  R       1:30      2 sh02-01n[59-60]
+     1988523    normal interact   kilian  R       1:28      2 sh02-01n[61-62]
+

With ClusterShell, you could get:

  • the list of node names where user kilian has jobs running:

    $ cluset -f @user:kilian
    +sh02-01n[59-62]
    +
  • the nodes where job 1988522 is running, in an expanded form:

    $ cluset -e @job:1988522
    +sh02-01n59 sh02-01n60
    +

Node states#

You can also use those binding to get lists of nodes in a particular state, in a given partition. For instance, to list the nodes that are in "mixed" state in the dev partition, you can request the intersection between the @state:mixed and @partition:dev node lists:

$ cluset -f @nodestate:mixed -i @partition:dev
+sh02-01n[57-58]
+

Local storage#

To get a list of files in $L_SCRATCH on all the nodes that are part of job 1988522:

$ $ clush -w@j:1988522 tree $L_SCRATCH
+sh02-01n59: /lscratch/kilian
+sh02-01n59: ├── 1988522
+sh02-01n59: │   └── foo
+sh02-01n59: │       └── bar
+sh02-01n59: └── 1993608
+sh02-01n59:
+sh02-01n59: 3 directories, 1 file
+sh02-01n60: /lscratch/kilian
+sh02-01n60: └── 1988522
+sh02-01n60:
+sh02-01n60: 1 directory, 0 files
+

Process tree#

To display your process tree across all the nodes your jobs are running on:

$ clush -w @u:$USER pstree -au $USER
+sh02-09n71: mpiBench
+sh02-09n71:   `-3*[{mpiBench}]
+sh02-09n71: mpiBench
+sh02-09n71:   `-3*[{mpiBench}]
+sh02-09n71: mpiBench
+sh02-09n71:   `-3*[{mpiBench}]
+sh02-09n71: mpiBench
+sh02-09n71:   `-3*[{mpiBench}]
+sh02-10n01: mpiBench
+sh02-10n01:   `-3*[{mpiBench}]
+sh02-10n01: mpiBench
+sh02-10n01:   `-3*[{mpiBench}]
+sh02-10n01: mpiBench
+sh02-10n01:   `-3*[{mpiBench}]
+sh02-10n01: mpiBench
+sh02-10n01:   `-3*[{mpiBench}]
+

CPU usage#

To get the CPU and memory usage of your processes in job 2003264:

$ clush -w @j:2003264 ps -u$USER -o%cpu,rss,cmd
+sh03-07n12: %CPU   RSS CMD
+sh03-07n12:  0.0  4780 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000
+sh03-07n12:  0.0  4784 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000
+sh03-07n12:  0.0  4784 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000
+sh03-07n12:  0.0  4780 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000
+sh03-06n06: %CPU   RSS CMD
+sh03-06n06:  0.0 59596 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000
+sh03-06n06:  0.0 59576 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000
+sh03-06n06:  0.0 59580 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000
+sh03-06n06:  0.0 59588 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000
+sh03-06n05: %CPU   RSS CMD
+sh03-06n05:  0.0  7360 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000
+sh03-06n05:  0.0  7328 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000
+sh03-06n05:  0.0  7344 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000
+sh03-06n05:  0.0  7340 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000
+sh03-06n11: %CPU   RSS CMD
+sh03-06n11: 17.0 59604 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000
+sh03-06n11: 17.0 59588 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000
+sh03-06n11: 17.0 59592 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000
+sh03-06n11: 17.0 59580 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000
+

GPU usage#

To show what's running on all the GPUs on the nodes associated with job 123456:

$ clush -bw @job:123456 nvidia-smi --format=csv --query-compute-apps=process_name,utilization.memory
+sh03-12n01: /share/software/user/open/python/3.6.1/bin/python3.6, 15832 MiB
+sh02-12n04: /share/software/user/open/python/3.6.1/bin/python3.6, 15943 MiB
+
\ No newline at end of file diff --git a/docs/software/using/julia/index.html b/docs/software/using/julia/index.html new file mode 100644 index 000000000..6fedee3db --- /dev/null +++ b/docs/software/using/julia/index.html @@ -0,0 +1,116 @@ + Julia - Sherlock

Julia

Introduction#

Julia is a high-level general-purpose dynamic programming language that was originally designed to address the needs of high-performance numerical analysis and computational science, without the typical need of separate compilation to be fast, also usable for client and server web use, low-level systems programming or as a specification language. Julia aims to create an unprecedented combination of ease-of-use, power, and efficiency in a single language.

More documentation#

The following documentation is specifically intended for using Julia on Sherlock. For more complete documentation about Julia in general, please see the Julia documentation.

Julia on Sherlock#

Julia is available on Sherlock and the corresponding module can be loaded with:

$ ml julia
+

For a list of available versions, you can execute ml spider julia at the Sherlock prompt, or refer to the Software list page.

Using Julia#

Once your environment is configured (ie. when the julia module is loaded), julia can be started by simply typing julia at the shell prompt:

$ julia
+
+_
+   _       _ _(_)_     |  Documentation: https://docs.julialang.org
+  (_)     | (_) (_)    |
+   _ _   _| |_  __ _   |  Type "?" for help, "]?" for Pkg help.
+  | | | | | | |/ _` |  |
+  | | |_| | | | (_| |  |  Version 1.0.0 (2018-08-08)
+ _/ |\__'_|_|_|\__'_|  |  Official https://julialang.org/ release
+|__/                   |
+
+julia>
+

For a listing of command line options:

$ julia --help
+
+julia [switches] -- [programfile] [args...]
+ -v, --version             Display version information
+ -h, --help                Print this message
+
+ -J, --sysimage <file>     Start up with the given system image file
+ -H, --home <dir>          Set location of `julia` executable
+ --startup-file={yes|no}   Load `~/.julia/config/startup.jl`
+ --handle-signals={yes|no} Enable or disable Julia's default signal handlers
+ --sysimage-native-code={yes|no}
+                           Use native code from system image if available
+ --compiled-modules={yes|no}
+                           Enable or disable incremental precompilation of modules
+
+ -e, --eval <expr>         Evaluate <expr>
+ -E, --print <expr>        Evaluate <expr> and display the result
+ -L, --load <file>         Load <file> immediately on all processors
+
+ -p, --procs {N|auto}      Integer value N launches N additional local worker processes
+                           "auto" launches as many workers as the number
+                           of local CPU threads (logical cores)
+ --machine-file <file>     Run processes on hosts listed in <file>
+
+ -i                        Interactive mode; REPL runs and isinteractive() is true
+ -q, --quiet               Quiet startup: no banner, suppress REPL warnings
+

Running a Julia script#

A Julia program is easy to run on the command line outside of its interactive mode.

Here is an example where we create a simple Hello World program and launch it with Julia

$ echo 'println("hello world")' > helloworld.jl
+

That script can now simply be executed by calling julia <script_name>:

$ julia helloworld.jl
+hello world
+

Submitting a Julia job#

Here's an example Julia sbatch script that can be submitted via sbatch:

#!/bin/bash
+
+#SBATCH --time=00:10:00
+#SBATCH --mem=4G
+#SBATCH --output=julia_test.log
+
+# load the module
+ml julia
+
+# run the Julia application
+julia helloworld.jl
+

You can save this script as julia_test.sbatch and submit it to the scheduler with:

$ sbatch julia_test.sbatch
+

Once the job is done, you should get a julia_test.log file in the current directory, with the following contents:

$ cat julia_test.log
+hello world
+

Julia packages#

Julia provides an ever-growing list of packages that can be used to install add-on functionality to your Julia code.

Installing packages with Julia is very simple. Julia includes a package module in its base installation that handles installing, updating, and removing packages.

First import the Pkg module:

julia> import Pkg
+julia> Pkg.status()
+    Status `~/.julia/environments/v1.0/Project.toml`
+

Julia packages only need to be installed once

You only need to install Julia packages once on Sherlock. Since fielsystems are shared, packages installed on one node will immediately be available on all nodes on the cluster.

Installing packages#

You can first check the status of packages installed on Julia using the status function of the Pkg module:

julia> Pkg.status()
+No packages installed.
+

You can then add packages using the add function of the Pkg module:

julia> Pkg.add("Distributions")
+INFO: Cloning cache of Distributions from git://github.com/JuliaStats/Distributions.jl.git
+INFO: Cloning cache of NumericExtensions from git://github.com/lindahua/NumericExtensions.jl.git
+INFO: Cloning cache of Stats from git://github.com/JuliaStats/Stats.jl.git
+INFO: Installing Distributions v0.2.7
+INFO: Installing NumericExtensions v0.2.17
+INFO: Installing Stats v0.2.6
+INFO: REQUIRE updated.
+

Using the status function again, you can see that the package and its dependencies have been installed:

julia> Pkg.status()
+Required packages:
+ - Distributions                 0.2.7
+Additional packages:
+ - NumericExtensions             0.2.17
+ - Stats                         0.2.6
+

Updating Packages#

The update function of the Pkg module can update all packages installed:

julia> Pkg.update()
+INFO: Updating METADATA...
+INFO: Computing changes...
+INFO: Upgrading Distributions: v0.2.8 => v0.2.10
+INFO: Upgrading Stats: v0.2.7 => v0.2.8
+

Removing packages#

The remove function of the Pkg module can remove any packages installed as well:

julia> Pkg.rm("Distributions")
+INFO: Removing Distributions v0.2.7
+INFO: Removing Stats v0.2.6
+INFO: Removing NumericExtensions v0.2.17
+INFO: REQUIRE updated.
+
+julia> Pkg.status()
+Required packages:
+ - SHA                           0.3.2
+
+julia> Pkg.rm("SHA")
+INFO: Removing SHA v0.3.2
+INFO: REQUIRE updated.
+
+julia> Pkg.status()
+No packages installed.
+

Examples#

Parallel job#

Julia can natively spawn parallel workers across multiple compute nodes, without using MPI. There are two main modes of operation:

  1. ClusterManager: in this mode, you can spawn workers from within the Julia interpreter, and each worker will actually submit jobs to the scheduler, executing instructions within those jobs.

  2. using the --machine-file option: here, you submit a SLURM job and run the Julia interpreter in parallel mode within the job's resources.

The second mode is easier to use, and more convenient, since you have all your resources available and ready to use when the job starts. In mode 1, you'll need to wait for jobs to be dispatched and executed inside Julia.

Here is a quick example on how to use the --machine-file option on Sherlock.

Given following Julia script (julia_parallel_test.jl) that will print a line with the process id and the node it's executing on, in parallel:

using Distributed
+@everywhere println("process: $(myid()) on host $(gethostname())")
+

You can submit the following job:

#!/bin/bash
+#SBATCH --nodes 2
+#SBATCH --ntasks-per-node 4
+#SBATCH --time 5:0
+
+ml julia
+julia --machine-file <(srun hostname -s)  ./julia_parallel_test.jl
+

Save as julia_test.sbatch, and then:

$ sbatch  julia_test.sbatch
+

It will:

  1. Request 2 nodes, 4 tasks per node (8 tasks total)
  2. load the julia module
  3. Run Julia in parallel with a machine file that is automatically generated, listing the nodes that are assigned to your job.

It should output something like this in your job's output file:

process: 1 on host sh-06-33.int
+      From worker 2:    process: 2 on host sh-06-33.int
+      From worker 3:    process: 3 on host sh-06-34.int
+      From worker 5:    process: 5 on host sh-06-33.int
+      From worker 4:    process: 4 on host sh-06-33.int
+      From worker 6:    process: 6 on host sh-06-33.int
+      From worker 8:    process: 8 on host sh-06-34.int
+      From worker 9:    process: 9 on host sh-06-34.int
+      From worker 7:    process: 7 on host sh-06-34.int
+
\ No newline at end of file diff --git a/docs/software/using/mariadb/index.html b/docs/software/using/mariadb/index.html new file mode 100644 index 000000000..014dd4a8f --- /dev/null +++ b/docs/software/using/mariadb/index.html @@ -0,0 +1,97 @@ + MariaDB - Sherlock

MariaDB

Introduction#

MariaDB is a community-developed fork of the MySQL relational database management system. It is completely compatible with MySQL and could be use as a drop-in replacement in the vast majority of cases.

More documentation#

The following documentation specifically intended for using MariaDB on Sherlock. For more complete documentation about MariaDB in general, please see the MariaDB documentation.

MariaDB on Sherlock#

We don't provide any centralized database service on Sherlock, but we provide a centralized installation of MariaDB, and each user is welcome to start their own instance of the database server to fit their jobs' needs.

The overall process to run an instance of MariaDB on Sherlock would look like this:

  1. configure and initialize your environment so you can start a database instance under your user account,
  2. start the database server,
  3. run SQL queries from the same node (via a local socket), or from other nodes and/or jobs (via the network).

Single-node access#

In that example, the database server and client will run within the same job, on the same compute node.

Preparation#

You first need to let MariaDB know where to store its database, where to log things, and how to allow connections from clients. The commands below only need to be executed once.

For this, you'll need to create a .my.cnf file in your home directory. Assuming you'll want to store your database files in a db/ directory in your $SCRATCH folder, you can run the following commands:

$ export DB_DIR=$SCRATCH/db
+$ mkdir $DB_DIR
+
+$ cat << EOF > ~/.my.cnf
+[mysqld]
+datadir=$DB_DIR
+socket=$DB_DIR/mariadb.sock
+user=$USER
+symbolic-links=0
+skip-networking
+
+[mysqld_safe]
+log-error=$DB_DIR/mariadbd.log
+pid-file=$DB_DIR/mariadbd.pid
+
+[mysql]
+socket=$DB_DIR/mariadb.sock
+EOF
+

.my.cnf doesn't support environment variables

Please note that if you edit your ~/.my.cnf file directly in a file editor, without using the HEREDOC syntax above, environment variables such as $DB_DIR, $HOME or $USER won't work: you will need to specify absolute paths explicitly, such as /scratch/users/kilian/db/mariadbd.log.

If you use the HEREDOC syntax, you can verify that the resulting .my.cnf file does actually contain full paths, and not environment variables anymore.

Once you have the .my.cnf file in place, you need to initialize your database with some internal data that MariaDB needs. In the same terminal, run the following commands:

$ ml system mariadb
+$ $MARIADB_DIR/scripts/mysql_install_db --basedir=$MARIADB_DIR  --datadir=$DB_DIR
+

Start the server#

You can now start the MariaDB server. For this, first get an allocation on a compute node, note the hostname of the compute node your job has been allocated, load the mariadb module, and then run the mysqld_safe process:

$ srun --pty bash
+$ echo $SLURM_JOB_NODELIST
+sh-01-01
+$ ml system mariadb
+$ mysqld_safe
+180705 18:14:27 mysqld_safe Logging to '/home/users/kilian/db/mysqld.log'.
+180705 18:14:28 mysqld_safe Starting mysqld daemon with databases from /home/users/kilian/db/
+

The mysqld_safe will be blocking, meaning it will not give the prompt back for as long as the MariaDB server runs.

If it does return on its own, it probably means that something went wrong, and you'll find more information about the issue in the $DB_DIR/mysqld.log file you defined in ~/.my.cnf.

Run queries#

You're now ready to run queries against that MariaDB instance, from the same node your job is running on.

From another terminal on Sherlock, connect to your job's compute node (here, it's sh-01-01, as shown above), load the mariadb module, and then run the mysql command: it will open the MariaDB shell, ready to run your SQL queries:

$ ssh sh-01-01
+$ ml system mariadb
+$ mysql
+Welcome to the MariaDB monitor.  Commands end with ; or \g.
+Your MariaDB connection id is 8
+Server version: 10.2.11-MariaDB Source distribution
+
+Copyright (c) 2000, 2017, Oracle, MariaDB Corporation Ab and others.
+
+Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
+
+MariaDB [(none)]>
+

Once you're done with your MariaDB instance, you can just terminate your job, and all the processes will be terminated automatically.

Multi-node access#

In case you need to run a more persistent instance of MariaDB, you can for instance submit a dedicated job to run the server, make it accessible over the network, and run queries from other jobs and/or nodes.

Enable network access#

The preparation steps are pretty similar to the single-node case, except the MariaDB server instance will be accessed over the network rather than through a local socket.

Network access must be secured

When running an networked instance of MariaDB, please keep in mind that any user on Sherlock will be able to connect to the TCP ports that mysqld runs on, and that proper configuration must be done to prevent unauthrozied access.

Like in the single-node case, you need to create a ~/.my.cnf file, but without the skip-networking directive.

$ export DB_DIR=$SCRATCH/db
+$ mkdir $DB_DIR
+
+$ cat << EOF > ~/.my.cnf
+[mysqld]
+datadir=$DB_DIR
+socket=$DB_DIR/mariadb.sock
+user=$USER
+symbolic-links=0
+
+[mysqld_safe]
+log-error=$DB_DIR/mariadbd.log
+pid-file=$DB_DIR/mariadbd.pid
+
+[mysql]
+socket=$DB_DIR/mariadb.sock
+EOF
+

And then initiate the database:

$ ml system mariadb
+$ $MARIADB_DIR/scripts/mysql_install_db --basedir=$MARIADB_DIR  --datadir=$DB_DIR
+

Secure access#

We will now set a password for the MariaDB root user to a random string, just for the purpose of preventing unauthorized access, since we won't need it for anything.

We will actually create a MariaDB user with all privileges on the databases, that will be able to connect to this instance from any node. This user will need a real password, though. So please make sure to replace the my-secure-password string below by the actual password of your choice.

Choose a proper password

This password will only be used to access this specific instance of MariaDB. Note that anybody knowing that password will be allowed to connect to your MariaDB instances and modify data in the tables.

  • do NOT literally use my-secure-password
  • do NOT use your SUNet ID password

Once you've chosen your password, you can start the mysqld process on a compute node, like before:

$ srun --pty bash
+$ echo $SLURM_JOB_NODELIST
+sh-01-01
+$ ml system mariadb
+$ mysqld_safe
+

And then, from another terminal, run the following commands to secure access to your MariaDB database.

$ ssh sh-01-01
+$ mysql -u root << EOF
+UPDATE mysql.user SET Password=PASSWORD(RAND()) WHERE User='root';
+DELETE FROM mysql.user WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1');
+DELETE FROM mysql.user WHERE User='';
+DELETE FROM mysql.db WHERE Db='test' OR Db='test_%';
+GRANT ALL PRIVILEGES ON *.* TO '$USER'@'%' IDENTIFIED BY 'my-secure-password' WITH GRANT OPTION;
+FLUSH PRIVILEGES;
+EOF
+

Once you've done that, you're ready to terminate that interactive job, and start a dedicated MariaDB server job.

Start MariaDB in a job#

You can use the following mariadb.sbatch job as a template:

#!/bin/bash
+
+#SBATCH --job-name=mariadb
+#SBATCH --time=8:0:0
+#SBATCH --dependency=singleton
+
+ml system mariadb
+mysqld_safe
+

and submit it with:

$ sbatch mariadb.sbatch
+

Concurrent instances will lead to data corruption

An important thing to keep in mind is that having multiple instances of a MariaDB server running at the same time, using the same database files, will certainly lead to catastrophic situations and the corruption of those files.

To prevent this from happening, the --dependency=singleton job submission option will make sure that only one instance of that job (based on its name and user) will run at any given time.

Connect to the running instance#

Now, from any node on Sherlock, whether from a login node, an interactive job, or a batch job, using the mysql CLI or any application binding in any language, you should be able to connect to your running MariaDB instance,

First, identify the node your job is running on with squeue:

$ squeue -u $USER -n mariadb
+             JOBID PARTITION     NAME     USER ST       TIME  NODES NODELIST(REASON)
+          21383445    normal  mariadb   kilian  R       0:07      1 sh-01-02
+

and then, point your MariaDB client to that node:

$ ml system mariadb
+$ mysql -h sh-01-02 -p
+Enter password:
+Welcome to the MariaDB monitor.  Commands end with ; or \g.
+Your MariaDB connection id is 15
+Server version: 10.2.11-MariaDB Source distribution
+
+Copyright (c) 2000, 2017, Oracle, MariaDB Corporation Ab and others.
+
+Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
+
+MariaDB [(none)]>
+

That's it! You can now run SQL queries from anywhere on Sherlock to your own MariaDB instance.

Persistent DB instances#

SQL data is persistent

All the data you import in your SQL databases will be persistent across jobs. Meaning that you can run a PostgreSQL server job for the day, import data in its database, stop the job, and resubmit the same PostgreSQL server job the next day: all your data will still be there as long as the location you've chosen for your database (the $DB_DIR defined in the Preparation steps) is on a persistent storage location.

If you need database access for more than the maximum runtime of a job, you can use the instructions provided to define self-resubmitting recurring jobs and submit long-running database instances.

\ No newline at end of file diff --git a/docs/software/using/matlab/index.html b/docs/software/using/matlab/index.html new file mode 100644 index 000000000..028b31493 --- /dev/null +++ b/docs/software/using/matlab/index.html @@ -0,0 +1,78 @@ + Matlab - Sherlock

Matlab

Introduction#

MATLAB is a numerical computing environment and proprietary programming language developed by MathWorks.

More documentation#

The following documentation is specifically intended for using Matlab on Sherlock. For more complete documentation about Matlab in general, please see the official MATLAB documentation.

MATLAB on Sherlock#

Licensing#

MATLAB is a commercial software suite, which is now available to no cost for all Stanford Faculty, students, and staff.

Note: a number of free, open-source alternatives exist and can be used in many situations: Octave, R, Julia, or Python are all available on Sherlock, and can often replace MATLAB with good results.

Using MATLAB#

The MATLAB module can be loaded with:

$ ml load matlab
+

This will load the current default version. For a list of available versions run ml spider matlab at the Sherlock prompt, or refer to the Software list page.

MATLAB can't run on login nodes

Running MATLAB directly on login nodes is not supported and will produce the following message:

-----------------------------------------------------------------------
+WARNING: running MATLAB directly on login nodes is not supported.  Please
+make sure you request an interactive session on a compute node with "sh_dev"
+for instance) before launching MATLAB interactively.
+-----------------------------------------------------------------------
+
You will need to submit a job or request an interactive session on a compute node before you can start MATLAB.

Once you are on a compute node and your environment is configured (ie. when the matlab module is loaded), MATLAB can be started by simply typing matlab at the shell prompt.

$ sh_dev
+$ ml load matlab
+$ matlab
+MATLAB is selecting SOFTWARE OPENGL rendering.
+                          < M A T L A B (R) >
+                Copyright 1984-2019 The MathWorks, Inc.
+                R2019a (9.6.0.1072779) 64-bit (glnxa64)
+                             March 8, 2019
+
+To get started, type doc.
+For product information, visit www.mathworks.com.
+
+>>
+

For a listing of command line options:

$ matlab -help
+

Running a MATLAB script#

There are several ways to launch a MATLAB script on the command line, as documented in the MATLAB documentation:

Method Output
matlab -nodesktop < script.m MATLAB will run the code from script.m and display output on stdout
matlab -nodisplay Start MATLAB in CLI mode, without its graphical desktop environment
matlab -nojvm do not start the JVM1

MATLAB GUI#

It's often best to use your laptop or desktop to develop, debug MATLAB and visualize the output. If you do need to use the MATLAB GUI on a large cluster like Sherlock, you will need to enable X11 forwarding in your SSH client.

For instance:

$ ssh -X <YourSUNetID>@login.sherlock.stanford.edu
+

And then, once on Sherlock:

$ sh_dev
+$ ml load matlab
+$ matlab
+

For more info on X11 forwarding, you can refer to this UIT page.

Examples#

Simple MATLAB job#

Here is an example MATLAB batch script that can submitted with sbatch:

#!/bin/bash
+#SBATCH --job-name=matlab_test
+#SBATCH --output=matlab_test."%j".out
+#SBATCH --error=matlab_test."%j".err
+#SBATCH --partition=normal
+#SBATCH --time=00:10:00
+#SBATCH --cpus-per-task=1
+#SBATCH --mem=8G
+#SBATCH --mail-type=ALL
+
+module load matlab
+matlab -nodisplay < example.m
+

This simple job, named matlab_test will run a MATLAB script named example.m in the normal partition, for a duration of 10 minutes, and use 1 CPU and 8GB of RAM. It will send you an email (to whatever email you used wen you signed up for Sherlock) when it begins, ends or fails.

Additionally, to aid in debugging, it will log any errors and output to the files matlab_test.JOBID.{out,err} with the jobid appended to the filename (%j).

To create the script, open a text editor on Sherlock, copy the contents of the script, and save it as matlab_test.sbatch

Then, submit the job with the sbatch command:

$ sbatch matlab_test.sbatch
+Submitted batch job 59942277
+

You can check the status of the job with the squeue command, and check the contents of the matlab_test.JOBID.{out,err} files to see the results.

Parallel loop#

You can run your MATLAB code across multiple CPUs on Sherlock using parfor loops, to take advantage of the multiple CPU cores that each node features. You can submit a job requesting as many CPUs as there are on a node in a single job. The key is to grab the SLURM environment variable $SLURM_CPUS_PER_TASK and create the worker pool in your MATLAB code with:

parpool('local', str2num(getenv('SLURM_CPUS_PER_TASK')))
+

Here is an example of a sbatch submission script that requests 16 CPUs on a node, and runs a simple MATLAB script using parfor.

Save the two scripts below as parfor.sbatch and parfor_loop.m:

#!/bin/bash
+#SBATCH -J pfor_matlab
+#SBATCH -o pfor".%j".out
+#SBATCH -e pfor".%j".err
+#SBATCH -t 20:00
+#SBATCH -p normal
+#SBATCH -c 16
+#SBATCH --mail-type=ALL
+
+module load matlab
+matlab -batch parfor_loop
+
%============================================================================
+% Parallel Monte Carlo calculation of PI
+%============================================================================
+parpool('local', str2num(getenv('SLURM_CPUS_PER_TASK')))
+R = 1;
+darts = 1e7;
+count = 0;
+tic
+parfor i = 1:darts
+   % Compute the X and Y coordinates of where the dart hit the...............
+   % square using Uniform distribution.......................................
+   x = R*rand(1);
+   y = R*rand(1);
+   if x^2 + y^2 <= R^2
+      % Increment the count of darts that fell inside of the.................
+      % circle...............................................................
+     count = count + 1; % Count is a reduction variable.
+   end
+end
+% Compute pi.................................................................
+myPI = 4*count/darts;
+T = toc;
+fprintf('The computed value of pi is %8.7f.n',myPI);
+fprintf('The parallel Monte-Carlo method is executed in %8.2f seconds.n', T);
+delete(gcp);
+exit;
+

You can now submit the job with the following command:

sbatch parfor.sbatch
+

If you run htop or pstree -u $USER on the compute node that is running your job, you will see all 16 cores allocated to your MATLAB code.

You can also try that same job with different numbers of CPUs, and see how well it scales.


  1. MATLAB uses the Java® Virtual Machine (JVM™) software to run the desktop and to display graphics. The -nojvm option enables you to start MATLAB without the JVM. Using this option minimizes memory usage and improves initial start-up speed, but restricts functionality. 

\ No newline at end of file diff --git a/docs/software/using/perl/index.html b/docs/software/using/perl/index.html new file mode 100644 index 000000000..7fb49d7b3 --- /dev/null +++ b/docs/software/using/perl/index.html @@ -0,0 +1,6 @@ + Perl - Sherlock

Perl

Introduction#

Perl is a high-level, general-purpose, interpreted, dynamic programming language. Originally developed by Larry Wall in 1987 as a general-purpose Unix scripting language to make report processing easier, it has since undergone many changes and revisions.

Perl provides a framework allowing users to easily extend the language by installing new modules in their local environment. The Comprehensive Perl Archive Network (CPAN1) is an archive of over 25,000 distributions of software written in Perl, as well as documentation for it. It is searchable at http://metacpan.org or http://search.cpan.org and mirrored in over 270 locations around the world.

More documentation#

The following documentation specifically intended for using Perl on Sherlock. For more complete documentation about Perl in general, please see the Perl documentation.

Perl modules on Sherlock#

To install Perl modules from CPAN, we recommend using the (provided) App::cpanminus module and local::lib modules:

  • App::cpanminus is a popular alternative CPAN client that can be used to manage Perl distributions. It has many great features, including uninstalling modules.
  • local::lib allows users to install Perl modules in the directory of their choice (typically their home directory) without administrative privileges.

Both are already installed on Sherlock, and are automatically enabled and configured when you load the perl module. You don't need to add anything in your ~/.bashrc file, the Sherlock perl module will automatically create everything that is required so you can directly run a command to install Perl modules locally.

Installation#

Perl modules installation is only necessary once

You only need to install Perl modules once on Sherlock. Since fielsystems are shared, modules installed on one node will immediately be available on all nodes on the cluster.

As an example, to install the DateTime::TimeZone module, you can do the following:

$ ml perl
+$ cpanm DateTime::TimeZone
+

Usage#

Once installed, you can use the Perl modules directly, no specific options or syntax is required.

For instance, to check that the DateTime::TimeZone module is correctly installed:

$ perl -MDateTime::TimeZone -e 'print $DateTime::TimeZone::VERSION . "\n"';
+2.13
+

Uninstallation#

To uninstall a Perl module:

$ cpanm -U DateTime::TimeZone
+

  1. CPAN can denote either the archive network itself, or the Perl program that acts as an interface to the network and as an automated software installer (somewhat like a package manager). Most software on CPAN is free and open source. 

\ No newline at end of file diff --git a/docs/software/using/postgresql/index.html b/docs/software/using/postgresql/index.html new file mode 100644 index 000000000..e63fed694 --- /dev/null +++ b/docs/software/using/postgresql/index.html @@ -0,0 +1,63 @@ + PostgreSQL - Sherlock

PostgreSQL

Introduction#

PostgreSQL is a powerful, open source object-relational database system with a strong focus on reliability, feature robustness, and performance.

More documentation#

The following documentation specifically intended for using PostgreSQL on Sherlock. For more complete documentation about PostgreSQL in general, please see the PostgreSQL documentation.

PostgreSQL on Sherlock#

We don't provide any centralized database service on Sherlock, but we provide a centralized installation of PostgreSQL, and each user is welcome to start their own instance of the database server to fit their jobs' needs.

The overall process to run an instance of PostgreSQL on Sherlock would look like this:

  1. configure and initialize your environment so you can start a database instance under your user account,
  2. start the database server,
  3. run SQL queries from the same node (via a local socket), or from other nodes and/or jobs (via the network).

Single-node access#

In that example, the database server and client will run within the same job, on the same compute node.

Preparation#

You first need to let PostgreSQL know where to store its database. The commands below only need to be executed once.

Assuming you'll want to store your database files in a db/ directory in your $SCRATCH folder, you can run the following commands:

$ export DB_DIR=$SCRATCH/db
+$ mkdir $DB_DIR
+

Once you have your $DB_DIR in place, you need to initialize your database with some internal data that PostgreSQL needs. In the same terminal, run the following commands:

$ ml system postgresql
+$ initdb $DB_DIR
+

Start the server#

You can now start the PostgreSQL server. For this, first get an allocation on a compute node, note the hostname of the compute node your job has been allocated, load the postgresql module, and then run the postgresql server:

$ srun --pty bash
+$ echo $SLURM_JOB_NODELIST
+sh-01-01
+$ ml system postgresql
+$ export DB_DIR=$SCRATCH/db
+$ postgres -D $DB_DIR
+[...]
+2018-10-09 17:42:08.094 PDT [3841] LOG:  database system is ready to accept connections
+

The postgres process will be blocking, meaning it will not give the prompt back for as long as the PostgreSQL server runs.

Run queries#

You're now ready to run queries against that PostgreSQL instance, from the same node your job is running on.

From another terminal on Sherlock, connect to your job's compute node (here, it's sh-01-01, as shown above), load the postgresql module, and then run the createdb command: it will create a database that you can use as a testbed:

$ ssh sh-01-01
+$ ml system postgresql
+$ createdb test_db
+

Once this is done, from the same shell, you can run the psql command, which will open the PostgreSQL shell, ready to run your SQL queries:

$ psql test_db
+psql (10.5)
+Type "help" for help.
+
+test_db=#
+

Once you're done with your PostgreSQL instance, you can just terminate your job, and all the processes will be terminated automatically.

Multi-node access#

In case you need to run a more persistent instance of PostgreSQL, you can for instance submit a dedicated job to run the server, make it accessible over the network, and run queries from other jobs and/or nodes.

Enable network access#

The preparation steps are pretty similar to the single-node case, except the PostgreSQL server instance will be accessed over the network rather than through a local socket.

Network access must be secured

When running an networked instance of PostgreSQL, please keep in mind that any user on Sherlock could potentially be able to connect to the TCP ports that postgres runs on, and that proper configuration must be done to prevent unauthorized access.

Like in the single-node case, you need to start the postgres server process, but with the -i option to enable network connections, and define user access in your $DB_DIR/pg_hba.conf file (see below).

Secure access#

To allow network connections to the database server, a password will need to be defined for the PostgreSQL user. That will allow this user to connect to the PostgreSQL instance from any node. Please make sure to replace the my-secure-password string below by the actual password of your choice.

Choose a proper password

This password will only be used to access this specific instance of PostgreSQL. Note that anybody knowing that password will be allowed to connect to your PostgreSQL instances and modify data in the tables.

  • do NOT use my-secure-password
  • do NOT use your SUNet ID password

Once you've chosen your password, you can now start the PostgreSQL server on a compute, as described in the previous section, initialize the database, and set the user password:

$ srun --pty bash
+
+$ echo $SLURM_JOB_NODELIST
+sh-01-01
+$ export DB_DIR=$SCRATCH/db
+$ mkdir $DB_DIR
+
+$ ml system postgresql
+$ initdb $DB_DIR
+$ createdb test_db
+
+$ psql -c "ALTER USER $USER PASSWORD 'my-secure-password';" test_db
+

Then, we need to edit the $DB_DIR/ph_hba.conf file to allow network access for user $USER:

$ cat << EOF > $DB_DIR/pg_hba.conf
+local   all             all                                     trust
+host    all             all             127.0.0.1/32            trust
+host    all             all             ::1/128                 trust
+host    all             $USER           samenet                 md5
+EOF
+

Once you've done that, you're ready to terminate that interactive job, and start a dedicated PostgreSQL server job.

$ pg_ctl stop -D $DB_DIR
+$ logout
+

Start PostgreSQL in a job#

You can use the following postgresql.sbatch job as a template:

#!/bin/bash
+
+#SBATCH --job-name=postgresql
+#SBATCH --time=8:0:0
+#SBATCH --dependency=singleton
+
+export DB_DIR=$SCRATCH/db
+
+ml system postgresql
+
+postgres -i -D $DB_DIR
+

and submit it with:

$ sbatch postgresql.sbatch
+

Concurrent instances will lead to data corruption

An important thing to keep in mind is that having multiple instances of a PostgreSQL server running at the same time, using the same database files, will certainly lead to catastrophic situations and the corruption of those files.

To prevent this from happening, the --dependency=singleton job submission option will make sure that only one instance of that job (based on its name and user) will run at any given time.

Connect to the running instance#

Now, from any node on Sherlock, whether from a login node, an interactive job, or a batch job, using the mysql CLI or any application binding in any language, you should be able to connect to your running PostgreSQL instance,

First, identify the node your job is running on with squeue:

$ squeue -u $USER -n postgresql
+             JOBID PARTITION       NAME     USER ST       TIME  NODES NODELIST(REASON)
+          21383445    normal postgresql   kilian  R       0:07      1 sh-01-02
+

and then, point your PostgreSQL client to that node:

$ ml system postgresql
+$ psql -h sh-06-34  test_db
+Password:
+psql (10.5)
+Type "help" for help.
+
+test_db=#
+

That's it! You can now run SQL queries from anywhere on Sherlock to your own PostgreSQL instance.

Persistent DB instances#

SQL data is persistent

All the data you import in your SQL databases will be persistent across jobs. Meaning that you can run a PostgreSQL server job for the day, import data in its database, stop the job, and resubmit the same PostgreSQL server job the next day: all your data will still be there as long as the location you've chosen for your database (the $DB_DIR defined in the Preparation steps) is on a persistent storage location.

If you need database access for more than the maximum runtime of a job, you can use the instructions provided to define self-resubmitting recurring jobs and submit long-running database instances.

\ No newline at end of file diff --git a/docs/software/using/python/index.html b/docs/software/using/python/index.html new file mode 100644 index 000000000..fa4a533c2 --- /dev/null +++ b/docs/software/using/python/index.html @@ -0,0 +1,63 @@ + Python - Sherlock

Python

Introduction#

Python is an interpreted high-level programming language for general-purpose programming. Its design philosophy emphasizes code readability. It provides constructs that enable clear programming on both small and large scales, which makes it both easy to learn and very well-suited for rapid prototyping.

More documentation#

The following documentation is specifically intended for using Python on Sherlock. For more complete documentation about Python in general, please see the Python documentation.

Python on Sherlock#

Sherlock features multiple versions of Python.

Some applications only work with legacy features of version 2.x, while more recent code will require specific version 3.x features. Modules on Sherlock may only be available in a single flavor (as denoted by their suffix: _py27 or _py36, because the application only supports one or the other.

You can load either version on Sherlock by doing the following commands:

$ ml python/2.7.13
+

or

$ ml python/3.6.1
+

The Python3 interpreter is python3

The Python3 executable is named python3, not python. So, once you have the "python/3.6.1" module loaded on Sherlock, you will need to use python3 to invoke the proper interpreter. python will still refer to the default, older system-level Python installation, and may result in errors when trying to run Python3 code.

This is an upstream decision detailed in PEP-394, not something specific to Sherlock.

Using Python#

Once your environment is configured (ie. when the Python module is loaded), Python can be started by simply typing python at the shell prompt:

$ python
+Python 2.7.13 (default, Apr 27 2017, 14:19:21)
+[GCC 4.8.5 20150623 (Red Hat 4.8.5-11)] on linux2
+Type "help", "copyright", "credits" or "license" for more information.
+>>>
+

Python in batch jobs#

Python output is buffered by default

By default, Python buffers console output. It means that when running Python in a batch job through Slurm, you may see output less often than you would when running interactively.

When output is being buffered, the print statements are aggregated until there is a enough data to print, and then the messages are all printed at once. And as a consequence, job output files (as specified with the --output and --error job submission options) will be refreshed less often and may give the impression that the job is not running.

For debugging or checking that a Python script is producing the correct output, you may want to switch off buffering.

Switching off buffering#

For a single python script you can use the -u option, as in python -u my_script.py. The -u option stands for "unbuffered".

For instance:

#!/bin/bash
+#SBATCH -n 1
+
+python -u my_script.py
+

Tip

You can also use the environment variable PYTHONUNBUFFERED to set unbuffered I/O for your whole batch script.

#!/bin/bash
+#SBATCH -n 1
+
+export PYTHONUNBUFFERED=True
+python my_script.py
+

NB: There is some performance penalty for having unbuffered print statements, so you may want to reduce the number of print statements, or run buffered for production runs.

Python packages#

The capabilities of Python can be extended with packages developed by third parties. In general, to simplify operations, it is left up to individual users and groups to install these third-party packages in their own directories. However, Sherlock provides tools to help you install the third-party packages that you need.

Among many others, the following common Python packages are provided on Sherlock:

Python modules on Sherlock generally follow the naming scheme below:

py-<package_name>/version_py<python_version>
+

For instance, NumPy modules are:

You can list all available module versions for a package with ml spider <package_name>. For instance:

$ ml spider tensorflow
+-------------------------------------------------------------------------------
+  py-tensorflow:
+-------------------------------------------------------------------------------
+    Description:
+      TensorFlow™ is an open source software library for numerical computation using data flow graphs.
+
+     Versions:
+        py-tensorflow/1.6.0_py27
+        py-tensorflow/1.6.0_py36
+        py-tensorflow/1.7.0_py27
+        py-tensorflow/1.9.0_py27
+        py-tensorflow/1.9.0_py36
+

Dependencies are handled automatically

When you decide to use NumPy on Sherlock, you just need to load the py-numpy module of your choice, and the correct Python interpreter will be loaded automatically. No need to load a python module explicitly.

Installing packages#

If you need to use a Python package that is not already provided as a module on Sherlock, you can use the pip command. This command takes care of compiling and installing most of Python packages and their dependencies. All of pip's commands and options are explained in detail in the Pip user guide.

A comprehensive index of Python packages can be found at PyPI.

To install Python packages with pip, you'll need to use the --user option. This will make sure that those packages are installed in a user-writable location (by default, your $HOME directory). Since your $HOME directory is shared across nodes on Sherlock, you'll only need to install your Python packages once, and they'll be ready to be used on every single node in the cluster.

For example:

$ pip install --user <package_name>
+

For Python 3, use pip3:

$ pip3 install --user <package_name>
+

Python packages will be installed in $HOME/.local/lib/python<<version>/site-packages, meaning that packages for Python 2.x and Python 3.x will be kept separate. This both means that they won't interfere with each other, but also that if you need to use a package with both Python 2.x and 3.x, you'll need to install it twice, once for each Python version.

List installed packages#

You can easily see the list of the Python packages installed in your environment, and their location, with pip list:

$ pip list -v
+Package    Version Location                                                            Installer
+---------- ------- ------------------------------------------------------------------- ---------
+pip        18.1    /share/software/user/open/python/2.7.13/lib/python2.7/site-packages pip
+setuptools 28.8.0  /share/software/user/open/python/2.7.13/lib/python2.7/site-packages pip
+urllib3    1.24    /home/users/kilian/.local/lib/python2.7/site-packages               pip
+virtualenv 15.1.0  /share/software/user/open/python/2.7.13/lib/python2.7/site-packages pip
+
Alternative installation path#

Python paths

While theoretically possible, installing Python packages in alternate locations can be tricky, so we recommend trying to stick to the pip install --user way as often as possible. But in case you absolutely need it, we provide some guidelines below.

One common case of needing to install Python packages in alternate locations is to share those packages with a group of users. Here's an example that will show how to install the urllib3 Python package in a group-shared location and let users from the group use it without having to install it themselves.

First, you need to create a directory to store those packages. We'll put it in $GROUP_HOME:

$ mkdir -p $GROUP_HOME/python/
+

Then, we load the Python module we need, and we instruct pip to install its packages in the directory we just created:

$ ml python/2.7.13
+$ PYTHONUSERBASE=$GROUP_HOME/python pip install --user urllib3
+

We still use the --user option, but with PYTHONUSERBASE pointing to a different directory, pip will install packages there.

Now, to be able to use that Python module, since it's not been installed in a default directory, you (and all the members of the group who will want to use that module) need to set their PYTHONPATH to include our new shared directory1:

$ export PYTHONPATH=$GROUP_HOME/python/lib/python2.7/site-packages:$PYTHONPATH
+

And now, the module should be visible:

$ pip list -v
+Package    Version Location                                                            Installer
+---------- ------- ------------------------------------------------------------------- ---------
+pip        18.1    /share/software/user/open/python/2.7.13/lib/python2.7/site-packages pip
+setuptools 28.8.0  /share/software/user/open/python/2.7.13/lib/python2.7/site-packages pip
+urllib3    1.24    /home/groups/ruthm/python/lib/python2.7/site-packages               pip
+virtualenv 15.1.0  /share/software/user/open/python/2.7.13/lib/python2.7/site-packages pip
+

$PYTHONPATH depends on the Python version

The $PYTHONPATH environment variable is dependent on the Python version you're using, so for Python 3.6, it should include $GROUP_HOME/python/lib/python3.6/site-packages

$PATH may also need to be updated

Some Python package sometimes also install executable scripts. To make them easily accessible in your environment, you may also want to modify your $PATH to include their installation directory.

For instance, if you installed Python packages in $GROUP_HOME/python:

$ export PATH=$GROUP_HOME/python/bin:$PATH
+

Installing from GitHub#

pip also supports installing packages from a variety of sources, including GitHub repositories.

For instance, to install HTTPie, you can do:

$ pip install --user git+git://github.com/jkbr/httpie.git
+
Installing from a requirements file#

pip allows installing a list of packages listed in a file, which can be pretty convenient to install several dependencies at once.

In order to do this, create a text file called requirements.txt and place each package you would like to install on its own line:

numpy
+scikit-learn
+keras
+tensorflow
+

You can now install your modules like so:

$ ml python
+$ pip install --user -r requirements.txt
+

Upgrading packages#

pip can update already installed packages with the following command:

$ pip install --user --upgrade <package_name>
+

Upgrading packages also works with requirements.txt files:

$ pip install --user --upgrade -r requirements.txt
+

Uninstalling packages#

To uninstall a Python package, you can use the pip uninstall command (note that it doesn't take any --user option):

$ pip uninstall <package_name>
+$ pip uninstall -r requirements.txt
+

Virtual environments#

🚧 Work in progress 🚧

This page is a work in progress and is not complete yet. We are actively working on adding more content and information.


  1. This line can also be added to a user's ~/.profile file, for a more permanent setting. 

\ No newline at end of file diff --git a/docs/software/using/quantum-espresso/index.html b/docs/software/using/quantum-espresso/index.html new file mode 100644 index 000000000..dbcd75520 --- /dev/null +++ b/docs/software/using/quantum-espresso/index.html @@ -0,0 +1,73 @@ + Quantum Espresso - Sherlock

Quantum Espresso

Introduction#

Quantum ESPRESSO is an integrated suite of Open-Source computer codes for electronic-structure calculations and materials modeling at the nanoscale. It is based on density-functional theory, plane waves, and pseudo-potentials.Perl is a high-level, general-purpose, interpreted, dynamic programming

Quantum ESPRESSO has evolved into a distribution of independent and inter-operable codes in the spirit of an open-source project. The Quantum ESPRESSO distribution consists of a “historical” core set of components, and a set of plug-ins that perform more advanced tasks, plus a number of third-party packages designed to be inter-operable with the core components. Researchers active in the field of electronic-structure calculations are encouraged to participate in the project by contributing their own codes or by implementing their own ideas into existing codes.

More documentation#

The following documentation specifically intended for using Quantum Espresso on Sherlock. For more complete documentation about Quantum Espresso in general, please see the Quantum Espresso documentation.

Quantum Espresso on Sherlock#

To run Quantum Espresso on Sherlock, you can use one of the [provided modules][url_soft_qe], or run it from a container.

The CPU version of Quantum Espresso can be loaded via the quantum-espresso module:

$ ml chemistry quantum-espresso
+

and the GPU version can be loaded via the quantum-espresso_gpu module:

$ ml chemistry quantum-espresso_gpu
+

Examples#

Here are a few examples showing how to run the AUSURF112 benchmark.

Preparation#

The first step is to get the benchmark files:

$ cd $SCRATCH
+$ git clone https://github.com/QEF/benchmarks qe_benchmarks
+$ cd qe_benchmarks/AUSURF112
+

CPU version#

To submit a Quantum Espresso job to run the AUSURF112 benchmark on CPU nodes, the following submission script can be used:

#!/bin/bash
+#SBATCH --nodes=2                # number of nodes for the job
+#SBATCH --ntasks-per-node=16     # number of tasks per node
+#SBATCH --time=00:30:00          # total run time limit (HH:MM:SS)
+#SBATCH --mail-type=begin        # send email when job begins
+#SBATCH --mail-type=end          # send email when job ends
+
+module reset
+module load chemistry
+module load quantum-espresso/7.0
+
+cd $SCRATCH/qe_benchmarks
+cd AUSURF112
+
+srun pw.x -input ausurf.in -npool 2
+

In this example, the job will request 32 CPU cores on 2 nodes, 30 minutes of run time, and will send an email notification when the job starts and when it ends.

The job can be submitted with:

$ sbatch qe-bench_cpu.sbatch
+

GPU version#

Native#

The GPU version can be loaded through the quantum-espresso_gpu module.

Using the same benchmark files as for the CPU version above, you can create a job submissions script like this:

#!/bin/bash
+#SBATCH --partition=gpu          # partition to submit the job to
+#SBATCH --nodes=2                # number of nodes for the job
+#SBATCH --gpus-per-node=1        # number of GPUs per node
+#SBATCH --time=00:30:00          # total run time limit (HH:MM:SS)
+#SBATCH --mail-type=begin        # send email when job begins
+#SBATCH --mail-type=end          # send email when job ends
+
+module reset
+module load chemistry
+module load quantum-espresso_gpu/7.0
+
+cd $SCRATCH/qe_benchmarks
+cd AUSURF112
+
+srun pw.x -input ausurf.in -npool 2
+

In this example, the job will request 2 GPU on 2 nodes, 30 minutes of run time, and will send an email notification when the job starts and when it ends.

It can be submitted with:

$ sbatch qe-bench_gpu.sbatch
+

NGC container#

Another option to run a GPU version of Quantum Espresso is to use a 3rd-party container.

The NVIDIA GPU Cloud (NGC) hosts a Quantum Espresso container container that could be used on Sherlock.

With Singularity#

To use the container with Singularity, first pull the Quantum Espresso container with:

$ cd $SCRATCH
+$ singularity pull docker://nvcr.io/hpc/quantum_espresso:qe-7.0
+

Then create the following script:

#!/bin/bash
+#SBATCH --partition=gpu          # partition to submit the job to
+#SBATCH --nodes=2                # number of nodes for the job
+#SBATCH --gpus-per-node=1        # number of GPUs per node
+#SBATCH --mem=32GB               # memory per node
+#SBATCH --time=00:30:00          # total run time limit (HH:MM:SS)
+#SBATCH --mail-type=begin        # send email when job begins
+#SBATCH --mail-type=end          # send email when job ends
+
+cd $SCRATCH/qe_benchmarks
+cd AUSURF112
+
+srun singularity run --nv \
+    $SCRATCH/quantum_espresso_qe-7.0.sif \
+    pw.x -input ausurf.in -npool 2
+

and submit it:

$ sbatch qe-bench_gpu_singularity.sbatch
+
With pyxis/enroot#

To use the container with pyxis/enroot, you can directly submit the following script:

#!/bin/bash
+#SBATCH --partition=gpu          # partition to submit the job to
+#SBATCH --nodes=2                # number of nodes for the job
+#SBATCH --gpus-per-node=1        # number of GPUs per node
+#SBATCH --mem=32GB               # memory per node
+#SBATCH --time=00:30:00          # total run time limit (HH:MM:SS)
+#SBATCH --mail-type=begin        # send email when job begins
+#SBATCH --mail-type=end          # send email when job ends
+
+cd $SCRATCH/qe_benchmarks
+cd AUSURF112
+
+srun --container-image nvcr.io/hpc/quantum_espresso:qe-7.0 \
+     --container-workdir $PWD \
+     pw.x -input ausurf.in -npool 2
+

and submit it:

$ sbatch qe-bench_gpu_singularity.sbatch
+
\ No newline at end of file diff --git a/docs/software/using/rclone/index.html b/docs/software/using/rclone/index.html new file mode 100644 index 000000000..7b1bea9ee --- /dev/null +++ b/docs/software/using/rclone/index.html @@ -0,0 +1,134 @@ + Rclone - Sherlock

Rclone

Introduction#

If you need to sync files between cloud storage to Sherlock, rclone is a command line program that can help. You can easily use it to transfer files from a cloud storage provider to Sherlock or Oak, or vice versa. The following tutorial walks through transferring files between Google Drive and Oak storage.

More documentation#

For more information on running rclone, please see the official documentation.

Setup#

rclone config#

Before transferring data for the first time, you will need to configure rclone so that it can access your Google Drive. This will require use of your browser, so you will need to connect to Sherlock with local port forwarding (ssh -L). You only need to do this when you are configuring rclone for the first time.

Use local terminal for rclone config

This method will not work in the Sherlock OnDemand shell. You will need to use your local machine's terminal to enable local port forwarding and to allow rclone to communicate with your browser. On Linux and macOS, you can use the Terminal app; on Windows, you can use the PowerShell app.

When running rclone config you will be prompted to enter names and values, indicated by the > symbol. To leave it empty, press Enter.

# Connect to Sherlock with local port fowarding
+$ ssh -L localhost:53682:localhost:53682 <SUNetID>@login.sherlock.stanford.edu
+
+
+# Load the rclone module
+$ ml system rclone
+
+
+# Run the rclone configuration tool
+$ rclone config
+
+No remotes found, make a new one?
+n) New remote
+s) Set configuration password
+q) Quit config
+n/s/q> n
+
+Enter name for new remote.
+name> gdrive
+
+Option Storage.
+Type of storage to configure.
+Choose a number from below, or type in your own value.
+ 1 / 1Fichier
+   \ (fichier)
+ 2 / Akamai NetStorage
+   \ (netstorage)
+       ...
+18 / Google Drive
+   \ (drive)
+       ...
+48 / premiumize.me
+   \ (premiumizeme)
+49 / seafile
+   \ (seafile)
+Storage> drive
+
+Option client_id.
+Google Application Client Id
+...
+Enter a value. Press Enter to leave empty.
+client_id>
+
+Option client_secret.
+OAuth Client Secret.
+Leave blank normally.
+Enter a value. Press Enter to leave empty.
+client_secret>
+
+Option scope.
+Scope that rclone should use when requesting access from drive.
+Choose a number from below, or type in your own value.
+Press Enter to leave empty.
+ 1 / Full access all files, excluding Application Data Folder.
+   \ (drive)
+...
+scope> 1
+
+Option service_account_file.
+Service Account Credentials JSON file path.
+Leave blank normally.
+...
+Enter a value. Press Enter to leave empty.
+service_account_file>
+
+Edit advanced config?
+y) Yes
+n) No (default)
+y/n> n
+
+Use auto config?
+ * Say Y if not sure
+ * Say N if you are working on a remote or headless machine
+
+y) Yes (default)
+n) No
+y/n> y
+
+2023/09/12 10:51:55 NOTICE: If your browser doesn't open automatically go to the
+following link: http://127.0.0.1:53682/auth?state=#################
+2023/09/12 10:51:55 NOTICE: Log in and authorize rclone for access
+2023/09/12 10:51:55 NOTICE: Waiting for code...
+

At this point, you can copy and paste the provided link into your browser. You will be asked to confirm that you want to allow rclone to access your files. Once you have successfully done so, you can complete the configuration in the terminal.

Configure this as a Shared Drive (Team Drive)?
+
+y) Yes
+n) No (default)
+y/n> n
+
+Configuration complete.
+Options:
+...
+Keep this "gdrive" remote?
+y) Yes this is OK (default)
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+
+Current remotes:
+
+Name                 Type
+====                 ====
+gdrive               drive
+
+e) Edit existing remote
+n) New remote
+d) Delete remote
+r) Rename remote
+c) Copy remote
+s) Set configuration password
+q) Quit config
+e/n/d/r/c/s/q> q
+

Examples#

rclone copy#

To transfer data between cloud storage and Sherlock or Oak, you can use the rclone copy command.

# Start an interactive dev session
+$ sh_dev
+
+# Load the rclone module
+$ ml system rclone
+
+# Copy a folder from Google Drive to Oak
+$ rclone copy gdrive:<folder name> /oak/stanford/groups/<group_name>/<folder name>
+
+$ Copy a single file from Oak to Google Drive
+$ rclone copy /oak/stanford/groups/<group name>/<file name> gdrive:
+

rclone ls/lsd#

To view the files and folders in your cloud storage, you can use the rclone ls and rclone lsd commands, respectively.

# Load the rclone module
+$ ml system rclone
+
+# List all top-level directories in Google Drive
+$ rclone lsd gdrive: --max-depth 1
+
+# List all files in a directory
+$ rclone ls gdrive:<folder name>
+
+# List all files on Google Drive (including those in folders)
+$ rclone ls gdrive:
+
\ No newline at end of file diff --git a/docs/software/using/schrodinger/index.html b/docs/software/using/schrodinger/index.html new file mode 100644 index 000000000..ca717e8dd --- /dev/null +++ b/docs/software/using/schrodinger/index.html @@ -0,0 +1,40 @@ + Schrödinger - Sherlock

Schrödinger

Introduction#

The Schrödinger suite is a commercial and licensed software used to simulate and model molecular behavior at the atomic level. The Schrödinger software tools include molecular dynamics simulations, quantum mechanics calculations, virtual screening and visualization tools.

More documentation#

The following documentation specifically intended for using Schrödinger on Sherlock. For more complete documentation about Schrödinger in general, please contact Schrödinger support.

Schrödinger on Sherlock#

Licensing#

Stanford Libraries have purchased a site license for the Schrödinger suite. Please contact Stanford Libraries at sciencelibrary@stanford.edu and CC srcc-support@stanford.edu if you would like to access Schrödinger on Sherlock: after we receive confirmation, your PI group will be granted access on Sherlock.

Using Schrödinger#

You can use Schrödinger software after having loaded the corresponding software module with the module command. To load the current default version:

module load chemistry schrodinger
+

To see all the available versions, you can use the module spider command:

$ module spider schrodinger
+

Once loaded, the $SCHRODINGER environment variable is automatically set to allow all Schrödinger commands to run. For example, to run the jaguar command:

$ jaguar run -WAIT H20.in
+

To call the basic Schrödinger run command, just enter:

$ run
+

or glide:

$ glide
+usage: glide_startup.py [options] <input_file>
+glide_startup.py: error: the following arguments are required: input_file
+

Maestro GUI#

OnDemand shell sessions

Opening an X11/GUI session will not work in a Sherlock OnDemand terminal session. You will need to use the method mentioned below, i.e. a standard terminal session with an X11 client.

To launch the Maestro GUI, once you have loaded the Schrödinger module, simply run:

$ maestro
+

You'll need to enable X11 forwarding in your initial connection to Sherlock, and request it as well for your job allocation.

Here are some example commands you can run:

# on your local machine
+$ ssh -X login.sherlock.stanford.edu
+
+# then from a Sherlock login node
+$ sh_dev -m 16GB
+
+# and finally on the allocated compute node:
+$ ml load chemistry schrodinger
+$ maestro
+

This will launch Maestro on a compute node and display its graphical user interface on your local machine's display.

GUI performance

Please note that running graphical user interfaces (GUIs) over the network via X11 over SSH may not necessarily yield the best performance. Graphical analysis is often best done on a local machine, while intensive, batch scheduled computations are carried over on the cluster.

For more information about X11 forwarding, you can refer to this page.

Examples#

batch job submission#

Here's an example batch script, requesting 1 CPU, for 10 minutes on the normal partition, that can be saved as water.sbatch:

#!/usr/bin/bash
+#SBATCH -o water.%j.out
+#SBATCH -e water.%j.err
+#SBATCH -n 1
+#SBATCH -t 10:00
+#SBATCH -p normal
+
+# Load required modules
+module load chemistry schrodinger
+
+# Run Schrödinger, -WAIT is often required
+jaguar run -WAIT H20.in
+

Save this input file as H2O.in:

&gen
+&
+&echo
+&
+&zmat
+O       0.0000000000000   0.0000000000000  -0.1135016000000
+H1      0.0000000000000   0.7531080000000   0.4540064000000
+H2      0.0000000000000  -0.7531080000000   0.4540064000000
+&
+

And you can submit the batch script with:

$ sbatch water.sbatch
+

After execution, you should find a H20.out output file in the current directory, as well as a log file (H20.log). If you don't, you can check for errors in the job output and error files: water.<jobid>.{out,err}.

\ No newline at end of file diff --git a/docs/software/using/spark/index.html b/docs/software/using/spark/index.html new file mode 100644 index 000000000..ebd45213f --- /dev/null +++ b/docs/software/using/spark/index.html @@ -0,0 +1,83 @@ + Spark - Sherlock

Spark

Introduction#

Apache Spark™ is a general engine for large-scale data processing. This document gives a quick introduction how to get a first test program in Spark running on Sherlock.

More documentation#

The following documentation specifically intended for using Spark on Sherlock. For more complete documentation about Spark in general, please see the Apache Spark documentation.

Spark on Sherlock#

Running Apache Spark on Sherlock is a bit different from using a traditional Spark/Hadoop cluster in that it requires some level of integration with the scheduler. In a sense, the computing resources (memory and CPU) need to be allocated twice. First, sufficient resources for the Spark application need to be allocated via Slurm ; and secondly, spark-submit resource allocation flags need to be properly specified.

In order to use Spark, three steps have to be kept in mind when submitting a job to the queuing system:

  1. a new Spark cluster has to be started on the allocated nodes
  2. once the Spark cluster is up and running, Spark jobs have to be submitted to the cluster
  3. after all Spark jobs have finished running, the cluster has to be shut down

The following scripts show how to implement these three steps, and use the Pi Monte-Carlo calculation as an example.

Single-node job#

In this example, all the Spark processes run on the same compute node, which makes for a fairly simply sbatch script. The following example will start a 8-core job on a single node, and run a Spark task within that allocation:

#!/bin/bash
+
+#SBATCH --job-name=spark_singlenode
+#SBATCH --nodes=1
+#SBATCH --cpus-per-task=8
+#SBATCH --time=10
+
+module load spark
+
+# This syntax tells spark to use all cpu cores on the node.
+export MASTER="local[*]"
+
+# This is a Scala example
+run-example SparkPi 1000
+
+# This is a Python example.
+spark-submit --master $MASTER $SPARK_HOME/examples/src/main/python/pi.py 1000
+

Multi-node job#

To start a Spark cluster and run a task on multiple nodes, more preliminary steps are necessary. Here's an example script that will span 2 nodes, start 2 Spark workers on each node, and allow each worker to use 8 cores:

#!/bin/bash
+#SBATCH --nodes=2
+#SBATCH --mem-per-cpu=4G
+#SBATCH --cpus-per-task=8
+#SBATCH --ntasks-per-node=2
+#SBATCH --output=sparkjob-%j.out
+
+## --------------------------------------
+## 0. Preparation
+## --------------------------------------
+
+# load the Spark module
+module load spark
+
+# identify the Spark cluster with the Slurm jobid
+export SPARK_IDENT_STRING=$SLURM_JOBID
+
+# prepare directories
+export SPARK_WORKER_DIR=${SPARK_WORKER_DIR:-$HOME/.spark/worker}
+export SPARK_LOG_DIR=${SPARK_LOG_DIR:-$HOME/.spark/logs}
+export SPARK_LOCAL_DIRS=${SPARK_LOCAL_DIRS:-/tmp/spark}
+mkdir -p $SPARK_LOG_DIR $SPARK_WORKER_DIR
+
+## --------------------------------------
+## 1. Start the Spark cluster master
+## --------------------------------------
+
+start-master.sh
+sleep 1
+MASTER_URL=$(grep -Po '(?=spark://).*' \
+             $SPARK_LOG_DIR/spark-${SPARK_IDENT_STRING}-org.*master*.out)
+
+## --------------------------------------
+## 2. Start the Spark cluster workers
+## --------------------------------------
+
+# get the resource details from the Slurm job
+export SPARK_WORKER_CORES=${SLURM_CPUS_PER_TASK:-1}
+export SPARK_MEM=$(( ${SLURM_MEM_PER_CPU:-4096} * ${SLURM_CPUS_PER_TASK:-1} ))M
+export SPARK_DAEMON_MEMORY=$SPARK_MEM
+export SPARK_WORKER_MEMORY=$SPARK_MEM
+export SPARK_EXECUTOR_MEMORY=$SPARK_MEM
+
+# start the workers on each node allocated to the tjob
+export SPARK_NO_DAEMONIZE=1
+srun  --output=$SPARK_LOG_DIR/spark-%j-workers.out --label \
+      start-slave.sh ${MASTER_URL} &
+
+## --------------------------------------
+## 3. Submit a task to the Spark cluster
+## --------------------------------------
+
+spark-submit --master ${MASTER_URL} \
+             --total-executor-cores $((SLURM_NTASKS * SLURM_CPUS_PER_TASK)) \
+             $SPARK_HOME/examples/src/main/python/pi.py 10000
+
+## --------------------------------------
+## 4. Clean up
+## --------------------------------------
+
+# stop the workers
+scancel ${SLURM_JOBID}.0
+
+# stop the master
+stop-master.sh
+
\ No newline at end of file diff --git a/docs/storage/data-protection/index.html b/docs/storage/data-protection/index.html new file mode 100644 index 000000000..1e9316e56 --- /dev/null +++ b/docs/storage/data-protection/index.html @@ -0,0 +1,20 @@ + Data protection - Sherlock

Data protection

Data protection is mostly a task for the user

Except for $HOME and $GROUP_HOME, data on Sherlock is not backed up, nor archived. It's up to each user and group to make sure they maintain multiple copies of their data if needed.

Snapshots#

File system snapshots represent the state of the file system at a particular point in time. They allow accessing the file system contents as it was a different times in the past, and get back data that may have been deleted or modified since the snapshot was taken.

Important

Snapshots are only available on $HOME and $GROUP_HOME.

Accessing snapshots#

Snapshots taken in $HOME and $GROUP_HOME are accessible in a .snapshot directory at any level of the hierarchy. Those .snapshot directories don't appear when listing directory contents with ls, but they can be listed explicitly or accessed with cd:

$ cd $HOME
+$ ls -ald .snapshot/users*
+[...]
+drwx------ 118 sunetid group  6680 Jul 21 11:16 .snapshot/users.daily.20170721
+drwx------ 118 sunetid group  6702 Jul 21 16:19 .snapshot/users.daily.20170722
+drwx------ 118 sunetid group  6702 Jul 21 16:19 .snapshot/users.daily.20170723
+drwx------ 118 sunetid group  6702 Jul 24 10:57 .snapshot/users.daily.20170724
+drwx------ 118 sunetid group  6702 Jul 24 10:57 .snapshot/users.daily.latest
+drwx------ 118 sunetid group  6702 Jul 21 16:19 .snapshot/users.hourly.20170722-16:00
+drwx------ 118 sunetid group  6702 Jul 21 16:19 .snapshot/users.hourly.20170722-17:00
+drwx------ 118 sunetid group  6702 Jul 21 16:19 .snapshot/users.hourly.20170722-18:00
+drwx------ 118 sunetid group  6702 Jul 21 16:19 .snapshot/users.hourly.20170722-19:00
+drwx------ 118 sunetid group  6702 Jul 21 16:19 .snapshot/users.hourly.20170722-20:00
+[...]
+$ cd .snapshot/users.daily.latest
+

For instance:

  • the $HOME/.snapshot/users.daily.latest directory is the latest daily snapshot available, and stores the contents of the $HOME directory as they were when the last daily snapshot was taken,
  • the $HOME/foo/.snapshot/users.hourly.20170722-18:00 can be used to retrieve the contents of the $HOME/foo directory as it was at 6pm on July 22th, 2017.

Restoring from a snapshot#

If you deleted a file or modified it and want to restore an earlier version, you can simply copy the file from its saved version in the appropriate snapshot.

Examples:

  • to restore the last known version of $HOME/foo/bar:

    $ cp $HOME/foo/.snapshot/users.hourly.latest/bar $HOME/foo/bar
    +

    or

    $ cp $HOME/.snapshot/foo/users.hourly.latest/bar $HOME/foo/bar
    +

    (both commands are equivalent)

  • to restore your ~/.bashrc file from 2 days ago:

    $ SNAP_DATE=$(date +%Y%m%d -d "2 days ago")
    +$ cp $HOME/.snapshot/users.daily.${SNAP_DATE}/.bashrc $HOME/.bashrc
    +

Snapshot policy#

The current1 policy is to take snapshots on an hourly, daily and weekly basis. Older snapshots automatically expire after their retention period. The snapshot policy applies to both $HOME and $GROUP_HOME storage spaces.

Snapshot frequency Retention period Number of snapshots
hourly 2 days 48
daily 1 week 7
weekly 1 month 4

The shortest interval between snapshots is an hour. That means that if you create a file and then delete it within the hour, it won't appear in snapshots, and you won't be able to restore it.

If a file exists for more than an hour, and is then deleted, it will be present in the hourly snapshots for the next 48 hours, and you'll be able to retrieve it during that period. Similarly, if a file exists for more than a day, it could be restored for up to 7 days.

Snapshots don't count towards your quota.

Snapshots, as well as the entire filesystem, are replicated to an off-site system, to ensure that data could be retrieved even in case of a catastrophic failure of the whole system or datacenter-level disaster.

Backups#

Although Stanford Research Computing doesn't offer any backup service per se, we do provide all the tools required to transfer data in and out of Sherlock.

Suggested options to backup your data include:


  1. The snapshot policy is subject to change and may be adjusted as the storage system usage conditions evolve. 

\ No newline at end of file diff --git a/docs/storage/data-sharing/index.html b/docs/storage/data-sharing/index.html new file mode 100644 index 000000000..6d053c119 --- /dev/null +++ b/docs/storage/data-sharing/index.html @@ -0,0 +1,47 @@ + Data sharing - Sherlock

Data sharing

The following sections present and detail options to share data across users and groups on Sherlock.

Sharing data locally on Sherlock#

Traditional Unix permissions#

Standard Unix file permissions are supported on Sherlock and provide read, write and execute permissions for the three distinct access classes.

The access classes are defined as follows:

  • Files and directories are owned by a user. The owner determines the file's user class. Distinct permissions apply to the owner.
  • Files and directories are assigned a group, which define the file's group class. Distinct permissions apply to members of the file's group. The owner may be a member of the file's group.
  • Users who are not the owner, nor a member of the group, comprise a file's others class. Distinct permissions apply to others.

The following permissions apply to each class:

  • The read permission grants the ability to read a file. When set for a directory, this permission grants the ability to read the names of files in the directory, but not to find out any further information about them such as contents, file type, size, ownership, permissions.
  • The write permission grants the ability to modify a file. When set for a directory, this permission grants the ability to modify entries in the directory. This includes creating files, deleting files, and renaming files.
  • The execute permission grants the ability to execute a file. This permission must be set for executable programs, including shell scripts, in order to allow the operating system to run them. When set for a directory, this permission grants the ability to access file contents and meta-information if its name is known, but not list files inside the directory, unless read is set also.

Shared directories traversal

If you need to give access to one of your files to another user, they will at least need execute permission on each directory within the path to that file.

The effective permissions are determined based on the first class the user falls within in the order of user, group then others. For example, the user who is the owner of the file will have the permissions given to the user class regardless of the permissions assigned to the group class or others class.

While traditional Unix permissions are sufficient in most cases to share files with all the users within the same group, they are not enough to share files with a specific subset of users, or with users from other groups. Access Control Lists (ACLs) can be used for that purpose.

There are two type of ACLs supported on Sherlock depending on the underlying filesystem:

Type Filesystems
NFSv4 ACLs $HOME and $GROUP_HOME
POSIX ACLs $SCRATCH, $GROUP_SCRATCH, $L_SCRATCH and $OAK

POSIX ACLs#

POSIX ACLs allows you to grant or deny access to files and directories for different users (or groups), independently of the file owner or group.

Two types of POSIX ACLs can be defined:

  • Access ACLs: grant permission for a specific file or directory.
  • Default ACLs: allow to set a default set of ACLs that will be applied to any file or directory without any already defined ACL. Can only be set on directories.

ACLs are set with the setfacl command, and displayed with getfacl. For more details and examples, please refer to this documentation.

In the example below, we allow two users to access a restricted directory located at $GROUP_SCRATCH/restricted-dir/:

$ cd $GROUP_SCRATCH
+
+### Create new directory
+$ mkdir restricted-dir
+
+### Remove 'group' and 'other' access
+$ chmod g-rwx,o-rwx restricted-dir
+
+### Give user bob read and traversal permissions to the directory
+$ setfacl -m u:bob:rX restricted-dir
+
+### Use default ACLs (-d) to give user bob read access to all new
+### files and sub-directories that will be created in "restricted-dir"
+$ setfacl -d -m u:bob:rX restricted-dir
+
+### Give user alice read, write and traversal permissions for the directory
+$ setfacl -m u:alice:rwX restricted-dir
+
+### Use default ACLs (-d) to give user alice read and write access to all
+### new files and sub-directories
+$ setfacl -d -m u:alice:rwX restricted-dir
+
+### Show ACLs
+$ getfacl restricted-dir
+# file: restricted-dir/
+# owner: joe
+# group: grp
+# flags: -s-
+user::rwx
+user:bob:r-x
+group::---
+mask::r-x
+other::---
+default:user::rwx
+default:user:alice:rwx
+default:user:bob:r-x
+default:group::---
+default:mask::rwx
+default:other::---
+

Default permissions on $GROUP_SCRATCH

By default, the Unix permissions on the root directory $GROUP_SCRATCH don't allow read nor traversal access for others (ie. any user not part of your PI group). If you need to share files with users outside of your own group, please contact us so we can set the appropriate permissions on your folder.

For $SCRATCH, you're the owner of the directory and so you can change the permissions yourself.

NFSv4 ACLs#

$HOME and $GROUP_HOME also allow setting ACLs, albeit with different syntax and semantics than POSIX ACLs. The principle is very similar, though.

An ACL in NFSv4 is a list of rules setting permissions on files or directories. A permission rule, or Access Control Entry (ACE), is of the form type:flags:principle:permissions.

Commonly used entries for these fields are:

  • type: A (allow) or D (deny)
  • flags: g (group), d (directory-inherit), f (file-inherit), n (no-propagate-inherit), or i (inherit-only)
  • principle: a named user (user@sherlock), a group, or one of three special principles: OWNER@, GROUP@, and EVERYONE@.
  • permissions: there are 14 permission characters, as well as the shortcuts R, W, and X. Here is a list of possible permissions that can be included in the permissions field (options are Case Sensitive)
  • r read-data (files) / list-directory (directories)
  • w write-data (files) / create-file (directories)
  • x execute (files) / change-directory (directories)
  • a append-data (files) / create-subdirectory (directories)
  • t read-attributes: read the attributes of the file/directory.
  • T write-attributes: write the attributes of the file/directory.
  • n read-named-attributes: read the named attributes of the file/directory.
  • N write-named-attributes: write the named attributes of the file/directory.
  • c read-ACL: read the file/directory NFSv4 ACL.
  • C write-ACL: write the file/directory NFSv4 ACL.
  • o write-owner: change ownership of the file/directory.
  • y synchronize: allow clients to use synchronous I/O with the server.
  • d delete: delete the file/directory. Some servers will allow a delete to occur if either this permission is set in the file/directory or if the delete-child permission is set in its parent directory.
  • D delete-child: remove a file or subdirectory from within the given directory (directories only)

A comprehensive listing of allowable field strings is given in the manual page nfs4_acl(5)

To see what permissions are set on a particular file, use the nfs4_getfacl command. For example, newly created file1 may have default permissions listed by ls -l as -rw-r—r—. Listing the permissions with nfs4_getfacl would display the following:

$ nfs4_getfacl file1
+A::OWNER@:rwatTnNcCoy
+A:g:GROUP@:rtncy
+A::EVERYONE@:rtncy
+

To set permissions on a file, use the nfs4_setfacl command. For convenience, NFSv4 provides the shortcuts R, W and X for setting read, write, and execute permissions. For example, to add write permissions for the current group on file1, use nfs4_setfacl with the -a switch:

$ nfs4_setfacl -a A::GROUP@:W file1
+

This command switched the GROUP@ permission field from rtncy to rwatTnNcCoy. However, be aware that NFSv4 file permission shortcuts have a different meanings than the traditional Unix r, w, and x. For example issuing chmod g+w file1 will set GROUP@ to rwatncy.

Although the shortcut permissions can be handy, often rules need to be more customized. Use nfs4_setfacl -e file1 to open the ACL for file1 in a text editor.

Access Control Entries allow more fine grained control over file and directory permissions than does the chmod command. For example, if user joe wants to give read, write and traverse permissions to jack for her directory private, she would issue:

$ nfs4_setfacl -R -a A::jack@sherlock:RWX private/
+

The -R switch recursively applies the rule to the files and directories within private/ as well.

To allow jack to create files and subdirectories within private/ with the permissions as granted above, inheritance rules need to be applied.

$ nfs4_setfacl -R -a A:fd:jack@sherlock:RWX private/
+

By default, each permission is in the Deny state and an ACE is required to explicitly allow a permission. However, be aware that a server may silently override a users ACE, usually to a less permissive setting.

For complete documentation and examples on using NFSv4 ACLs, please see the manual page at nfs4_acl(5).

Default permissions on $GROUP_HOME

By default, the Unix permissions on the root directory $GROUP_HOME don't allow read nor traversal access for others (ie. any user not part of your PI group). If you need to share files with users outside of your own group, please contact us so we can set the appropriate permissions on your folder.

For $HOME, you're the owner of the directory and so you can change the permissions yourself.

Sharing data outside of Sherlock#

If you'd like to share data stored on Sherlock with external collaborators, there are two possibilities:

  1. sponsor a SUNet ID1 for these collaborators, and contact us us to create a account for them on Sherlock. This will grant them access to your resources on Sherlock (compute as well as storage) and give them access to your group shared files, like any other user in your group.

  2. if you don't want to grant full access to your Sherlock resources to your external collaborators, you can use the Globus data sharing feature. This won't require your collaborators to get Stanford accounts, and will allow easy sharing of the datasets of your choice.

    Globus Sharing is only available through the Oak endpoint

    Globus Sharing is only available on $OAK, using the Oak Globus Endpoint 2 (srcc#oak).

    For complete details about sharing data with Globus, please see the Globus documentation at https://docs.globus.org/how-to/share-files/


  1. a base-level SUNet ID (free) is sufficient to get an account on Sherlock. For more details about SUNet ID levels and associated services, please see the Stanford UIT SUNet IDs page

  2. SUNet ID required 

\ No newline at end of file diff --git a/docs/storage/data-transfer/index.html b/docs/storage/data-transfer/index.html new file mode 100644 index 000000000..ef99ca6b8 --- /dev/null +++ b/docs/storage/data-transfer/index.html @@ -0,0 +1,69 @@ + Data transfer - Sherlock

Data transfer

Transfer protocols#

A number of methods allow transferring data in/out of Sherlock. For most cases, we recommend using SSH-based file transfer commands, such as scp, sftp, or rsync. They will provide the best performance for data transfers from and to campus.

For large transfers, using DTNs is recommended

Most casual data transfers could be done through the login nodes, by pointing your transfer tool to login.sherlock.stanford.edu. But because of resource limits on the login nodes, larger transfer may not work as expected.

For transferring large amounts of data, Sherlock features a specific Data Transfer Node, with dedicated bandwidth, as well as a managed Globus endpoint, that can be used for scheduled, unattended data transfers.

We also provide tools on Sherlock to transfer data to various Cloud providers, such as AWS, Google Drive, Dropbox, Box, etc.

Prerequisites#

Most of the commands detailed below require a terminal and an SSH client1 on your local machine to launch commands.

You'll need to start a terminal and type the given example commands at the prompt, omitting the initial $ character (it just indicates a command prompt, and then should not be typed in).

Host keys#

Upon your very first connection to Sherlock, you will be greeted by a warning such as :

The authenticity of host 'login.sherlock.stanford.edu' can't be established.
+ECDSA key fingerprint is SHA256:eB0bODKdaCWtPgv0pYozsdC5ckfcBFVOxeMwrNKdkmg.
+Are you sure you want to continue connecting (yes/no)?
+

The same warning will be displayed if your try to connect to one of the Data Transfer Node (DTN):

The authenticity of host 'dtn.sherlock.stanford.edu' can't be established.
+ECDSA key fingerprint is SHA256:eB0bODKdaCWtPgv0pYozsdC5ckfcBFVOxeMwrNKdkmg.
+Are you sure you want to continue connecting (yes/no)?
+

This warning is normal: your SSH client warns you that it is the first time it sees that new computer. To make sure you are actually connecting to the right machine, you should compare the ECDSA key fingerprint shown in the message with one of the fingerprints below:

Key type Key Fingerprint
RSA SHA256:T1q1Tbq8k5XBD5PIxvlCfTxNMi1ORWwKNRPeZPXUfJA
legacy format: f5:8f:01:46:d1:f9:66:5d:33:58:b4:82:d8:4a:34:41
ECDSA SHA256:eB0bODKdaCWtPgv0pYozsdC5ckfcBFVOxeMwrNKdkmg
legacy format: 70:4c:76:ea:ae:b2:0f:81:4b:9c:c6:5a:52:4c:7f:64

If they match, you can proceed and type ‘yes’. Your SSH program will then store that key and will verify it for every subsequent SSH connection, to make sure that the server you're connecting to is indeed Sherlock.

Host keys warning#

If you've connected to Sherlock 1.0 before, there's a good chance the Sherlock 1.0 keys were stored by your local SSH client. In that case, when connecting to Sherlock 2.0 using the sherlock.stanford.edu alias, you will be presented with the following message:

@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+@ WARNING: POSSIBLE DNS SPOOFING DETECTED! @
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+The RSA host key for sherlock.stanford.edu has changed, and the key for
+the corresponding IP address 171.66.97.101 is unknown. This could
+either mean that DNS SPOOFING is happening or the IP address for the
+host and its host key have changed at the same time.
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+@ WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED! @
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+IT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY!
+Someone could be eavesdropping on you right now (man-in-the-middle
+attack)!  It is also possible that a host key has just been changed.
+The fingerprint for the RSA key sent by the remote host is
+SHA256:T1q1Tbq8k5XBD5PIxvlCfTxNMi1ORWwKNRPeZPXUfJA.
+Please contact your system administrator.
+

You can just check that the SHA256 key listed in that warning message correctly matches the one listed in the table above, and if that's the case, you can safely remove the sherlock.stanford.edu entry from your ~/.ssh/known_hosts file with the following command on your local machine:

$ ssh-keygen -R sherlock.stanford.edu
+

and then connect again. You'll see the first-connection prompt mentioned above, and your SSH client will store the new keys for future connections.

SSH-based protocols#

User name

In all the examples below, you'll need to replace <sunetid> by your actual SUNet ID. If you happen to use the same login name on your local machine, you can omit it.

SCP (Secure Copy)#

The easiest command to use to transfer files to/from Sherlock is scp. It works like the cp command, except it can work over the network to copy files from one computer to another, using the secure SSH protocol.

The general syntax to copy a file to a remote server is:

$ scp <source_file_path> <username>@<remote_host>:<destination_path>'
+

For instance, the following command will copy the file named foo from your local machine to your home directory on Sherlock:

$ scp foo <sunetid>@login.sherlock.stanford.edu:
+
Note the : character, that separates the hostname from the destination path. Here, the destination path is empty, which will instruct scp to copy the file in your home directory.

You can copy foo under a different name, or to another directory, with the following commands:

$ scp foo <sunetid>@login.sherlock.stanford.edu:bar
+$ scp foo <sunetid>@login.sherlock.stanford.edu:~/subdir/baz
+

To copy back files from Sherlock to your local machine, you just need to reverse the order of the arguments:

$ scp <sunetid>@login.sherlock.stanford.edu:foo local_foo
+

And finally, scp also support recursive copying of directories, with the -r option:

$ scp -r dir/ <sunetid>@login.sherlock.stanford.edu:dir/
+
This will copy the dir/ directory and all of its contents in your home directory on Sherlock.

SFTP (Secure File Transfer Protocol)#

SFTP clients are interactive file transfer programs, similar to FTP, which perform all operations over an encrypted transport.

A variety of graphical SFTP clients are available for different OSes:

When setting up your connection to Sherlock in the above programs, use the following information:

Hostname: login.sherlock.stanford.edu
+Port:     22
+Username: SUNet ID
+Password: SUNet ID password
+

OpenSSH also provides a command-line SFTP client, originally named sftp.

To log in to Sherlock:

$ sftp <sunetid>@login.sherlock.stanford.edu
+Connected to login.sherlock.stanford.edu.
+sftp>
+
For more information about using the command-line SFTP client, you can refer to this tutorial for more details and examples.

rsync#

If you have complex hierarchies of files to transfer, or if you need to synchronize a set of files and directories between your local machine and Sherlock, rsync will be the best tool for the job. It will efficiently transfer and synchronize files across systems, by checking the timestamp and size of files. Which means that it won't re-transfer files that have not changed since the last transfer, and will complete faster.

For instance, to transfer the whole ~/data/ folder tree from your local machine to your home directory on Sherlock, you can use the following command:

$ rsync -a ~/data/ <sunetid>@login.sherlock.stanford.edu:data/
+
Note the slash (/) at the end of the directories name, which is important to instruct rsync to synchronize the whole directories.

To get more information about the transfer rate and follow its progress, you can use additional options:

$ rsync -avP ~/data/ <sunetid>@login.sherlock.stanford.edu:data/
+sending incremental file list
+./
+file1
+      1,755,049 100%    2.01MB/s    0:00:00 (xfr#2, to-chk=226/240)
+file2
+      2,543,699 100%    2.48MB/s    0:00:00 (xfr#3, to-chk=225/240)
+file3
+     34,930,688  19%   72.62MB/s    0:00:08
+
+[...]
+
For more information about using the rsync, you can refer to this tutorial for more details and examples.

SSHFS#

Sometimes, moving files in and out of the cluster, and maintaining two copies of each of the files you work on, both on your local machine and on Sherlock, may be painful. Fortunately, Sherlock offers the ability to mount any of its filesystems to your local machine, using a secure and encrypted connection.

With SSHFS, a FUSE-based filesystem implementation used to mount remote SSH-accessible filesystems, you can access your files on Sherlock as if they were locally stored on your own computer.

This comes particularly handy when you need to access those files from an application that is not available on Sherlock, but that you already use or can install on your local machine. Like a data processing program that you have licensed for your own computer but can't use on Sherlock, a specific text editor that only runs on macOS, or any data-intensive 3D rendering software that wouldn't work comfortably enough over a forwarded X11 connection.

SSHFS is available for Linux , macOS , and Windows .

SSHFS on macOS

SSHFS on macOS is known to try to automatically reconnect filesystem mounts after resuming from sleep or suspend, even without any valid credentials. As a result, it will generate a lot of failed connection attempts and likely make your IP address blacklisted on login nodes.

Make sure to unmount your SSHFS drives before putting your macOS system to sleep to avoid this situation.

The following option could also be useful to avoid some permission issues: -o defer_permissions

For instance, on a Linux machine with SSHFS installed, you could mount your Sherlock home directory via a Sherlock DTN with the following commands:

$ mkdir ~/sherlock_home
+$ sshfs <sunetid>@dtn.sherlock.stanford.edu:./ ~/sherlock_home
+

Using DTNs for data transfer

Using the Sherlock DTNs instead of login nodes will ensure optimal performance for data transfers. Login nodes only have limited resources, that could limit data transfer rates or disconnect during long data transfers.

And to unmount it:

$ umount ~/sherlock_home
+

On Windows, once SSHFS is installed, you can mount the $SCRATCH filesystem as a network drive through the windows file explorer. To do this, go to "This PC", right-click in the "Network Locations" section of the window and select "Add a Network Drive". Then, in the "Add Network Location Wizard", you would use the following network address:

\\sshfs\<sunetid>@dtn.sherlock.stanford.edu
+

This will mount the $SCRATCH partition as a network drive on your PC.

For more information about using SSHFS on your local machine, you can refer to this tutorial for more details and examples.

Globus#

Globus improves SSH-based file transfer protocols by providing the following features:

  • automates large data transfers,
  • handles transient errors, and can resume failed transfers,
  • simplifies the implementation of high-performance transfers between computing centers.

Globus is a Software as a Service (SaaS) system that provides end-users with a browser interface to initiate data transfers between endpoints. Globus allows users to "drag and drop" files from one endpoint to another. Endpoints are terminals for data; they can be laptops or supercomputers, and anything in between. The Globus web service negotiates, monitors, and optimizes transfers through firewalls and across network address translation (NAT). Under certain circumstances, with high performance hardware transfer rates exceeding 1 GB/s are possible. For more information about Globus, please see the Globus documentation.

Authentication#

To use Globus, you will first need to authenticate at Globus.org. You can either sign up for a Globus account, or use your SUNet ID account for authentication to Globus (which will be required to authenticate to the Sherlock endpoint).

To use your SUNet ID, choose "Stanford University" from the drop down menu at the Login page and follow the instructions from there.

Transfer#

Endpoint name

The Globus endpoint name for Sherlock is SRCC Sherlock.

Oak endpoint

The Sherlock endpoint only provides access to Sherlock-specific file systems ($HOME, $GROUP_HOME, $SCRATCH and $GROUP_SCRATCH). Oak features its own Globus endpoint: SRCC Oak.

You can use Globus to transfer data between your local workstation (e.g., your laptop or desktop) and Sherlock. In this workflow, you configure your local workstation as a Globus endpoint by installing the Globus Connect software.

  1. Log in to Globus.org
  2. Use the Manage Endpoints interface to "add Globus Connect Personal" as an endpoint (you'll need to install Globus Connect Personal on your local machine)
  3. Transfer Files, using your new workstation endpoint for one side of the transfer, and the Sherlock endpoint (SRCC Sherlock) on the other side.

You can also transfer data between two remote endpoints, by choosing another endpoint you have access to instead of your local machine.

CLI and API#

Globus also provides a command-line interface (CLI) and application programming interface (API) as alternatives to its web interface.

For more information about the API, please see the Globus API documentation for more details.

For more information about the CLI, please see the Globus CLI documentation and Globus CLI quick start. Note that the Globus CLI is available through the module system on Sherlock:

$ module load system py-globus-cli
+$ globus login
+# follow instructions to get set up
+

Once you've authorized the application, you can use the globus CLI to copy files in between endpoints and collections that you have access to. Endpoints and collections are identified by their unique UUID4 identifiers, which are viewable through the Globus web app. The CLI will step you through any additional authorizations required for you to access the endpoints or collections.

For example, to asynchronously copy files between Sherlock and Oak (if that you have already been allocated Oak storage):

$ GLOBUS_SHERLOCK_UUID="6881ae2e-db26-11e5-9772-22000b9da45e"
+$ GLOBUS_OAK_UUID="8b3a8b64-d4ab-4551-b37e-ca0092f769a7"
+$ globus transfer --recursive \
+    "$GLOBUS_SHERLOCK_UUID:$SCRATCH/my-interesting-project" \
+    "$GLOBUS_OAK_UUID:$OAK/my-interesting-project-copy"
+

Data Transfer Nodes (DTNs)#

No shell

The DTNs don't provide any interactive shell, so connecting via SSH directly won't work. It will only accept scp, sftp, rsync of bbcp connections.

A pool of dedicated Data Transfer Nodes is available on Sherlock, to provide exclusive resources for large-scale data transfers.

The main benefit of using it is that transfer tasks can't be disrupted by other users interactive tasks or filesystem access and I/O-related workloads on the login nodes.

By using the Sherlock DTNs, you'll make sure that your data flows will go through a computer whose sole purpose is to move data around.

It supports:

To transfer files via the DTNs, simply use dtn.sherlock.stanford.edu as a remote server host name. For instance:

$ scp foo <sunetid>@dtn.sherlock.stanford.edu:~/foo
+

$HOME on DTNs

One important difference to keep in mind when transferring files through the Sherlock DTNs is that the default destination path for files, unless specified, is the user $SCRATCH directory, not $HOME.

That means that the following command:

$ scp foo <sunetid>@dtn.sherlock.stanford.edu:
+
will create the foo file in $SCRATCH/foo, and not in $HOME/foo.

You can transfer file to your $HOME directory via the DTNs by specifying the full path as the destination: $ scp foo <sunetid>@dtn.sherlock.stanford.edu:$HOME/foo

Cloud storage#

If you need to backup some of your Sherlock files to cloud-based storage services, we also provide a set of utilities that can help.

Google Drive#

Google Drive storage for Stanford users

For more information about using Google Drive at Stanford, please see the University IT Google Drive page.

We provide the rclone tool on Sherlock to interact with Google Drive. You'll just need to load the rclone module to be able to use it to move your files from/to Google Drive:

$ module load system rclone
+$ rclone --help
+

This tutorial provides an example of transferring files between Google Drive and Oak storage.

The Globus CLI (see above) can also be used to copy files from Sherlock to Stanford's Google Drive.

AWS#

You can also access AWS storage from the Sherlock command line with the AWS Command Line Interface:

$ module load system aws-cli
+$ aws help
+

Other services#

If you need to access other cloud storage services, you can use rclone: it can be used to sync files and directories to and from Google Drive, Amazon S3, Box, Dropbox, Google Cloud Storage, Amazon Drive, Microsoft OneDrive and many more.

$ ml load system rclone
+$ rclone -h
+

For more details about how to use rclone, please see the official documentation.


  1. For more details, see the SSH clients page

  2. Fetch is a commercial program, and is available as part of the Essential Stanford Software bundle. 

\ No newline at end of file diff --git a/docs/storage/filesystems/index.html b/docs/storage/filesystems/index.html new file mode 100644 index 000000000..07deed26e --- /dev/null +++ b/docs/storage/filesystems/index.html @@ -0,0 +1,6 @@ + Filesystems - Sherlock

Filesystems

The following sections describe the characteristics and best uses of each of the Sherlock's filesystems.

$HOME#

Summary

$HOME is your home directory. It's the best place to keep your code and important data as it provides snapshots and off-site replication. It is not meant to host data that will be actively read and written to by compute jobs.

Characteristics
Type high speed, distributed NFS file system
Quota 15 GB for the whole $HOME directory
Snapshots yes (cf. Snapshots) for more info)
Backups off-site replication
Purge policy not purged
Scope all login and compute nodes

$HOME is best suited for personal configuration files, scripts, small reference files or datasets, source code and individual software installation

When you log in, the system automatically sets the current working directory to $HOME: it's the location you'll end up when connecting to Sherlock. You can store your source code and build your executables there.

We strongly recommend using $HOME to reference your home directory in scripts, rather than its explicit path.

Checking quota usage#

The sh_quota tool can be used to display quota usage on $HOME

$ sh_quota -f HOME
+

See the Checking Quotas section for more details.


$GROUP_HOME#

Summary

$GROUP_HOME is your group home directory. It's the best place to keep your group's shared code, software installations and important data as it provides snapshots and off-site replication. It is not meant to host data that will be actively read and written to by compute jobs.

$HOME and $GROUP_HOME are based on the same physical file system.

Characteristics
Type high speed, distributed NFS file system
Quota 1 TB for the whole $GROUP_HOME directory
Snapshots yes (cf. Snapshots) for more info)
Backups off-site replication
Purge policy not purged
Scope all login and compute nodes

$GROUP_HOME is best suited for group shared source code, common software installations, shared data sets and scripts.

We strongly recommend using $GROUP_HOME to reference your group home directory in scripts, rather than its explicit path.

Checking quota usage#

The sh_quota tool can be used to display quota usage on $GROUP_HOME

$ sh_quota -f GROUP_HOME
+

See the Checking Quotas section for more details.


$SCRATCH#

Summary

$SCRATCH is your personal scratch space. It's the best place to store temporary files, such as raw job output, intermediate files, unprocessed results, and so on.

Purge policy

Files are automatically purged from $SCRATCH after an inactivity period:

  • files that are not modified after 90 days are automatically deleted,
  • contents need to change for a file to be considered modified. The touch command does not modify file contents and thus does not extend a file's lifetime on the filesystem.

$SCRATCH is not meant to store permanent data, and should only be used for data associated with currently running jobs. It's not a target for backups, archived data, etc. See the Expiration Policy section for details.

Characteristics
Type Parallel, high-performance Lustre file system
Quota 100 TB / 20,000,000 inodes2
Snapshots NO
Backups NO
Purge policy data not modified in the last 90 days are automatically purged
Scope all login and compute nodes

$SCRATCH is best suited for large files, such as raw job output, intermediate job files, unprocessed simulation results, and so on. This is the recommended location to run jobs from, and to store files that will be read or written to during job execution.

Old files are automatically purged on $SCRATCH so users should avoid storing long-term data there.

Each compute node has a low latency, high-bandwidth Infiniband link to $SCRATCH. The aggregate bandwidth of the filesystem is about 75GB/s. So any job with high data performance requirements will take advantage from using $SCRATCH for I/O.

We strongly recommend using $SCRATCH to reference your scratch directory in scripts, rather than its explicit path.

Checking quota usage#

The sh_quota tool can be used to display quota usage on $SCRATCH

$ sh_quota -f SCRATCH
+

See the Checking Quotas section for more details.

Expiration policy#

Inactive files are automatically purged

Files that are not modified in the last 90 days will be automatically deleted from the filesystem.

To manage available space and maintain optimal performance for all jobs, all files on $SCRATCH are subject to automatic purges. Meaning that after a period of inactivity, files that are not used anymore will be automatically deleted from the filesystem.

File activity is defined based on the last time a file's contents (the actual data in the file) have been modified. Meaning that files whose contents have not been modified in the previous 90 days will be automatically deleted.

Each time a file's contents are modified, the expiration countdown is reset, and the file gets another 90-day of lifetime.

Metadata changes don't qualify as an update

Modifying a file's contents is the only way to reset the expiration countdown and extend the file's lifetime on the filesystem.

Metadata modifications such as: reading the file, renaming it, moving it to a different directory, changing its permissions or its ownership, "touching" it to update its last modification or access times, won't have any effect on the purge countdown.

Purges are based on an internal filesystem property that reflects the last date a file's data has been modified, and which is unfortunately not readily accessible by users.

Please note that tools like ls will only display the date of the last metadata1 modification for a file, which is not necessarily relevant to determine a file's eligibility for deletion. For instance, using the touch command on a file to update its last modification date will only update the metadata, not the data, and as such, will not reset the purge countdown timer.

Filesystem purges are a continuous process: they don't run at particular times, but are carried out in a permanent background fashion. Files are not necessarily deleted right away when they become eligible for deletion. For instance, if you create a file on February 1st and don't ever modify it afterwards, it will be automatically become eligible for deletion on May 1st, and can be deleted anytime after this date.

Empty directory trees that stay devoid of any file for more than 90 days will be automatically cleaned up as well.


$GROUP_SCRATCH#

$SCRATCH and $GROUP_SCRATCH are based on the same physical file system.

Summary

$GROUP_SCRATCH is your group shared scratch space. It's the best place to store temporary files, such as raw job output, intermediate files, or unprocessed results that need to be shared among users within a group.

$GROUP_SCRATCH is NOT a backup target

$GROUP_SCRATCH is not meant to store permanent data, and should only be used for data associated with currently running jobs. It's not a target for backups, archived data, etc.

Characteristics
Type parallel, high-performance Lustre file system
Quota 100 TB / 20,000,000 inodes2
Snapshots NO
Backups NO
Purge policy data not accessed in the last 90 days are automatically purged
Scope all login and compute nodes

$GROUP_SCRATCH is best suited for large files, such as raw job output, intermediate job files, unprocessed simulation results, and so on. This is the recommended location to run jobs from, and to store files that will be read or written to during job execution.

Old files are automatically purged on $GROUP_SCRATCH so users should avoid storing long-term data there.

We strongly recommend using $GROUP_SCRATCH to reference your group scratch directory in scripts, rather than its explicit path.

Checking quota usage#

The sh_quota tool can be used to display quota usage on $GROUP_SCRATCH

$ sh_quota -f GROUP_SCRATCH
+

See the Checking Quotas section for more details.

Expiration policy#

As $SCRATCH and $GROUP_SCRATCH are on the same filesystem, the same expiration policy applies to both. Please see the $SCRATCH section above for more details.


$L_SCRATCH#

Summary

$L_SCRATCH is local to each compute node, and could be used to store temporary files for jobs with high IOPS requirements. Files stored in $L_SCRATCH are purged at the end of the job.

Characteristics
Type local filesystem, specific to each node, based on SSD
Quota n/a (usable space limited by the size of the physical storage devices, typically around 150 GB)
Snapshots NO
Backups NO
Purge policy data immediately purged at the end of the job
Scope locally on each node, not shared across nodes

$L_SCRATCH is best suited for small temporary files and applications which require low latency and high IOPS levels, typically intermediate job files, checkpoints, dumps of temporary states, etc.

Files stored in $L_SCRATCH are local to each node and can't be accessed from other nodes, nor from login nodes.

Please note that an additional, job-specific environment variable, $L_SCRATCH_JOB, will be set to a subdirectory of $L_SCRATCH for each job. So, if you have two jobs running on the same compute node, $L_SCRATCH will be the same and accessible from both jobs, while $L_SCRATCH_JOB will be different for each job.

For instance, if you have jobs 98423 and 98672 running on this same nodes, the variables will be set as follows:

Job id $L_SCRATCH L_SCRATCH_JOB
98423 /lscratch/kilian /lscratch/kilian/98423
98672 /lscratch/kilian /lscratch/kilian/98672

We strongly recommend using $L_SCRATCH to reference your local scratch directory in scripts, rather than its full path.

Expiration policy#

All files stored in $L_SCRATCH_JOB are automatically purged at the end of the job, whether the job was successful or not. If you need to conserve files that were generated in $L_SCRATCH_JOB after the job ends, don't forget to add a command at the end of your batch script to copy them to one of the more persistent storage locations, such as $HOME or $SCRATCH.

Data stored in $L_SCRATCH will be purged at the end of a job, only if no other job from the same user is still running on the node. Which means that data stored in $L_SCRATCH (but in not $L_SCRATCH_JOB) will persist on the node until the last job from the user terminates.


$OAK#

Summary

$OAK is Stanford Research Computing's research data storage offering. It provides an affordable, longer-term storage option for labs and researchers, and is ideally suited to host large datasets, or curated, post-processed results from job campaigns, as well as final results used for publication.

Order $OAK

Oak storage can be easily ordered online using the Oak Storage Service page.

$OAK is opt-in and is available as an option on Sherlock. Meaning that only members of groups which have purchased storage on Oak can access this filesystem.

For complete details and characteristics, including pricing, please refer to the Oak Storage Service page.

Characteristics
Type parallel, capacitive Lustre filesystem
Quota amount purchased (in 10 TB increments)
Snapshots NO
Backups optional cloud backup available
please contact us for details
Purge policy not purged
Scope all login and compute nodes
also available through gateways outside of Sherlock

$OAK is ideally suited for large shared datasets, archival data and curated, post-processed results from job campaigns, as well as final results used for publication.

Although jobs can directly read and write to $OAK during execution, it is recommended to first stage files from $OAK to $SCRATCH at the beginning of a series of jobs, and save the desired results back from $SCRATCH to $OAK at the end of the job campaign.

We strongly recommend using $OAK to reference your group home directory in scripts, rather than its explicit path.

$OAK is not backed up

$OAK is not backed up or replicated, by design, and deleted files cannot be recovered. We recommend all researchers to keep an additional copy of their important files (for instance, in Google Drive).

Cloud backup option

For additional data security, Stanford Research Computing now offers "cloud backup" of Oak data as a managed service option. For an additional monthly fee, data on Oak can be backed up to the cloud (researchers are responsible for cloud storage costs). Please contact us if you'd like additional information.

Checking quota usage#

The sh_quota tool can be used to display quota usage on $OAK

$ sh_quota -f OAK
+

See the Checking Quotas section for more details.


  1. Metadata are data such as a file's size, name, path, owner, permissions, etc. 

  2. An inode (index node) is a data structure in a Unix-style file system that describes a file-system object such as a file or a directory. 

\ No newline at end of file diff --git a/docs/storage/index.html b/docs/storage/index.html new file mode 100644 index 000000000..62f193b1c --- /dev/null +++ b/docs/storage/index.html @@ -0,0 +1,51 @@ + Storage on Sherlock - Sherlock

Storage on Sherlock#

Sherlock provides access to several file systems, each with distinct storage characteristics. Each user and PI group get access to a set of predefined directories in these file systems to store their data.

Sherlock is a compute cluster, not a storage system

Sherlock's storage resources are limited and are shared among many users. They are meant to store data and code associated with projects for which you are using Sherlock's computational resources. This space is for work actively being computed on with Sherlock, and should not be used as a target for backups from other systems.

If you're looking for a long-term storage solution for research data, Stanford Research Computing offers the Oak storage system, which is specifically intended for this usage.

Those file systems are shared with other users, and are subject to quota limits and for some of them, purge policies (time-residency limits).

Filesystem overview#

Features and purpose#

Name Type Backups / Snapshots Performance Purpose Cost
$HOME, $GROUP_HOME NFS / low small, important files (source code, executable files, configuration files...) free
$SCRATCH, $GROUP_SCRATCH Lustre / high bandwidth large, temporary files (checkpoints, raw application output...) free
$L_SCRATCH local SSD / low latency, high IOPS job specific output requiring high IOPS free
$OAK Lustre option / moderate long term storage of research data volume-based1

Access scope#

Name Scope Access sharing level
$HOME cluster user
$GROUP_HOME cluster group
$SCRATCH cluster user
$GROUP_SCRATCH cluster group
$L_SCRATCH compute node user
$OAK cluster (optional, purchase required) group

Group storage locations are typically shared between all the members of the same PI group. User locations are only accessible by the user.

Quotas and limits#

Volume and inodes

Quotas are applied on both volume (the amount of data stored in bytes) and inodes: an inode (index node) is a data structure in a Unix-style file system that describes a file-system object such as a file or a directory. In practice, each filesystem entry (file, directory, link) counts as an inode.

Name Quota type Volume quota Inode quota Retention
$HOME directory 15 GB n/a
$GROUP_HOME directory 1 TB n/a
$SCRATCH directory 100 TB 20 million time limited
$GROUP_SCRATCH directory 100 TB 20 million time limited
$L_SCRATCH n/a n/a n/a job lifetime
$OAK directory amount purchased function of the volume purchased

Quota types:

  • directory: based on files location and account for all the files that are in a given directory.
  • user: based on files ownership and account for all the files that belong to a given user.
  • group: based on files ownership and account for all the files that belong to a given group.

Retention types:

  • : files are kept as long as the user account exists on Sherlock.
  • time limited: files are kept for a fixed length of time after they've been last modified. Once the limit is reached, files expire and are automatically deleted.
  • job lifetime: files are only kept for the duration of the job and are automatically purged when the job ends.

Global fail-safe user and quota groups on /scratch

To prevent potential issues which would result in the file system filling up completely and making it unusable for everyone, additional user and group-level quotas are in place on the /scratch file system, as a fail-safe:

  • a user will not be able to use more than 250 TB (50M inodes) in total, in all the /scratch directories they have access to.

  • a group will not be able to use more than 1 PB (200M inodes) in total across all the /scratch directories its group members have access to.

Checking quotas#

To check your quota usage on the different filesystems you have access to, you can use the sh_quota command:

$ sh_quota
++---------------------------------------------------------------------------+
+| Disk usage for user kilian (group: ruthm)                                 |
++---------------------------------------------------------------------------+
+|   Filesystem |  volume /   limit                  | inodes /  limit       |
++---------------------------------------------------------------------------+
+          HOME |   9.4GB /  15.0GB [||||||     62%] |      - /      - (  -%)
+    GROUP_HOME | 562.6GB /   1.0TB [|||||      56%] |      - /      - (  -%)
+       SCRATCH |  65.0GB / 100.0TB [            0%] | 143.8K /  20.0M (  0%)
+ GROUP_SCRATCH | 172.2GB / 100.0TB [            0%] |  53.4K /  20.0M (  0%)
+           OAK |  30.8TB / 240.0TB [|          12%] |   6.6M /  36.0M ( 18%)
++---------------------------------------------------------------------------+
+

Several options are provided to allow listing quotas for a specific filesystem only, or in the context of a different group (for users who are members of several PI groups). Please see the sh_quota usage information for details:

$ sh_quota -h
+sh_quota: display user and group quota information for all accessible filesystems.
+
+Usage: sh_quota [OPTIONS]
+    Optional arguments:
+        -f FILESYSTEM   only display quota information for FILESYSTEM.
+                        For instance: "-f $HOME"
+        -g GROUP        for users with multiple group memberships, display
+                        group quotas in the context of that group
+        -n              don't display headers
+        -j              JSON output (implies -n)
+
Examples#

For instance, to only display your quota usage on $HOME:

$ sh_quota -f HOME
+

If you belong to multiple groups, you can display the group quotas for your secondary groups with:

$ sh_quota -g <group_name>
+

And finally, for great output control, an option to display quota usage in JSON is provided via the -j option:

$ sh_quota -f SCRATCH -j
+{
+  "SCRATCH": {
+    "quotas": {
+      "type": "user",
+      "blocks": {
+        "usage": "47476660",
+        "limit": "21474836480"
+      },
+      "inodes": {
+        "usage": "97794",
+        "limit": "20000000"
+      }
+    }
+  }
+}
+

Locating large directories#

It's not always easy to identify files and directories that take the most space when getting close to the quota limits. Some tools can help with that.

  • du can be used to display the volume used by files and directories, in a given folder:

    $ cd mydir/
    +$ du --human-readable --summarize  *
    +101M    dir
    +2.0M    file
    +

    Note

    du will ignore hidden entries (everything that starts with a dot (.)). So when using it in your $HOME directory, it will skip things like .cache or .conda, which can contain significant volumes.

  • ncdu is an interactive disk usage analyzer, that generates visual representation of the volume (and inode count) for directories. To run it, you need to load the ncdu module, and then run it on your directory of choice:

    $ ml system ncdu
    +$ ncdu $HOME
    +

    For very large directories, running ncdu in an interactive shell on a compute node is recommended, via sh_dev.

    You'll been there presented with an interactive file browser, showing information about the volume used by your directories, which should make easy to pinpoint where most space is used.

Info

Note that any tool you use to view directory contents will only be able to show files that your user account has read access to. So on group-shared spaces, if you see a major difference between the totals from a tool like ncdu and the information reported by sh_quota, that can be an indicator that one of your group members has restricted permissions on a large number of items in your space.

Where should I store my files?#

Not all filesystems are equivalent

Choosing the appropriate storage location for your files is an essential step towards making your utilization of the cluster the most efficient possible. It will make your own experience much smoother, yield better performance for your jobs and simulations, and contribute to make Sherlock a useful and well-functioning resource for everyone.

Here is where we recommend storing different types of files and data on Sherlock:

  • personal scripts, configuration files and software installations → $HOME
  • group-shared scripts, software installations and medium-sized datasets → $GROUP_HOME
  • temporary output of jobs, large checkpoint files → $SCRATCH
  • curated output of job campaigns, large group-shared datasets, archives → $OAK

Accessing filesystems#

On Sherlock#

Filesystem environment variables

To facilitate access and data management, user and group storage location on Sherlock are identified by a set of environment variables, such as $HOME or $SCRATCH.

We strongly recommend using those variables in your scripts rather than explicit paths, to facilitate transition to new systems for instance. By using those environment variables, you'll be sure that your scripts will continue to work even if the underlying filesystem paths change.

To see the contents of these variables, you can use the echo command. For instance, to see the absolute path of your $SCRATCH directory:

$ echo $SCRATCH
+/scratch/users/kilian
+

Or for instance, to move to your group-shared home directory:

$ cd $GROUP_HOME
+

From other systems#

External filesystems cannot be mounted on Sherlock

For a variety of security, manageability and technical considerations, we can't mount external filesystems nor data storage systems on Sherlock. The recommended approach is to make Sherlock's data available on external systems.

You can mount any of your Sherlock directories on any external system you have access to by using SSHFS. For more details, please refer to the Data Transfer page.


  1. For more information about Oak, its characteristics and cost model, please see the Oak Service Description page

\ No newline at end of file diff --git a/docs/storage/overview/index.html b/docs/storage/overview/index.html new file mode 100644 index 000000000..e0c38c74d --- /dev/null +++ b/docs/storage/overview/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/docs/tags/index.html b/docs/tags/index.html new file mode 100644 index 000000000..ae5e83762 --- /dev/null +++ b/docs/tags/index.html @@ -0,0 +1 @@ + Tags - Sherlock
\ No newline at end of file diff --git a/docs/tech/facts/index.html b/docs/tech/facts/index.html new file mode 100644 index 000000000..2b2f87ff7 --- /dev/null +++ b/docs/tech/facts/index.html @@ -0,0 +1,13 @@ + Facts - Sherlock

Sherlock facts#

as of May 2024

Users#

  • 7,276 user accounts

  • 1,137 PI groups

    from all Stanford's seven Schools, SLAC, Stanford Institutes, etc.

  • 202 owner groups

Interfaces#

  • 12 login nodes

  • 3 data transfer nodes (DTNs)

Computing#

  • 5.44 PFLOPs (FP64)

    19.76 (FP32) PFLOPs

  • 55,632 CPU cores

    4 CPU generations (13 CPU models)

  • 796 GPUs

    4 GPU generations (12 GPU models)

Hardware#

  • 1,732 compute nodes

    19 server models (from 3 different manufacturers)

  • 37 racks

    1,188 rack units

Energy#

  • 554.03 kW

    total power usage

  • 58 PDUs

Storage#

  • 9.7 PB $SCRATCH

    parallel, distributed filesystem, delivering over 200 GB/s of I/O bandwidth

  • 58.3 PB $OAK

    long term research data storage

Networking#

  • 104 Infiniband switches

    across 2 Infiniband fabrics (EDR, HDR)

  • 5,730 Infiniband cables

    spanning about 30.12 km

  • 53 Ethernet switches

Scheduler#

  • 179 Slurm partitions

  • 43,261 CPU.hours/day

    over 4 years of computing in a single day

  • $2,890,546 /month

    to run the same workload on t2.large on-demand cloud instances

\ No newline at end of file diff --git a/docs/tech/index.html b/docs/tech/index.html new file mode 100644 index 000000000..8a19f85fe --- /dev/null +++ b/docs/tech/index.html @@ -0,0 +1,3 @@ + Technical specifications - Sherlock

Technical specifications#

In a nutshell#

Sherlock features over 1,700 compute nodes, 55,600+ CPU cores and 700+ GPUs, for a total computing power of more than 5.4 Petaflops. That would rank it in the Top500 list of the most powerful supercomputers in the world.

The cluster currently extends across 2 Infiniband fabrics (EDR, HDR). A 9.7 PB parallel, distributed filesystem, delivering over 200 GB/s of I/O bandwidth, provides scratch storage for more than 7,200 users, and 1,100 PI groups.

Resources#

The Sherlock cluster has been initiated in January 2014 with a base of freely available computing resources (about 2,000 CPU cores) and the accompanying networking and storage infrastructure (about 1 PB of shared storage).

Since then, it's been constantly expanding, spawning multiple cluster generations, with numerous contributions from many research groups on campus.

Cluster generations

For more information about Sherlock's ongoing evolution and expansion, please see Cluster generations.

Interface#

Type Qty Details
login nodes 12 sherlock.stanford.edu (load-balanced)
data transfer nodes 3 dedicated bandwidth for large data transfers

Computing#

Access to computing resources

Computing resources marked with below are freely available to every Sherlock user. Resources marked with are only accessible to Sherlock owners and their research teams.

Type Access Nodes CPU cores Details
compute nodes
normal partition
195 5,236 - 57x 20 (Intel E5-2640v4), 128 GB RAM, EDR IB
- 40x 24 (Intel 5118), 191 GB RAM, EDR IB
- 28x 32 (AMD 7543), 256 GB RAM, HDR IB
- 70x 32 (AMD 7502), 256 GB RAM, HDR IB
development nodes
dev partition
4 104 - 2x 20 (Intel E5-2640v4), 128 GB RAM, EDR IB
- 2x 32 (AMD 7543P), 256 GB RAM, HDR IB
- 32x Tesla A30_MIG-1g.6gb
large memory nodes
bigmem partition
9 504 - 4x 24 (Intel 5118), 384 GB RAM, EDR IB
- 1x 32 (Intel E5-2697Av4), 512 GB RAM, EDR IB
- 1x 56 (Intel E5-4650v4), 3072 GB RAM, EDR IB
- 1x 64 (AMD 7502), 4096 GB RAM, HDR IB
- 2x 128 (AMD 7742), 1024 GB RAM, HDR IB
GPU nodes
gpu partition
26 748 - 1x 20 (Intel E5-2640v4), 256 GB RAM, EDR IB
- 4x Tesla P100 PCIe
- 1x 20 (Intel E5-2640v4), 256 GB RAM, EDR IB
- 4x Tesla P40
- 3x 20 (Intel E5-2640v4), 256 GB RAM, EDR IB
- 4x Tesla V100_SXM2
- 1x 24 (Intel 5118), 191 GB RAM, EDR IB
- 4x Tesla V100_SXM2
- 2x 24 (Intel 5118), 191 GB RAM, EDR IB
- 4x Tesla V100 PCIe
- 16x 32 (AMD 7502P), 256 GB RAM, HDR IB
- 4x Geforce RTX_2080Ti
- 2x 32 (AMD 7502P), 256 GB RAM, HDR IB
- 4x Tesla V100S PCIe
privately-owned nodes
owners partition
1,494 48,680 40 different node configurations, including GPU and bigmem nodes
Total 1,732 55,632 796

Storage#

More information

For more information about storage options on Sherlock, please refer to the Storage section of the documentation.

Sherlock is architected around shared storage components, meaning that users can find the same files and directories from all of the Sherlock nodes.

  • Highly-available NFS filesystem for user and group home directories (with hourly snapshots and off-site replication)
  • High-performance Lustre scratch filesystem (9.7 PB parallel, distributed filesystem, delivering over 200 GB/s of I/O bandwidth)
  • Direct access to Stanford Research Computing's Oak long-term research data storage system (58.3 PB)
\ No newline at end of file diff --git a/docs/tech/status/index.html b/docs/tech/status/index.html new file mode 100644 index 000000000..2da6bd342 --- /dev/null +++ b/docs/tech/status/index.html @@ -0,0 +1,26 @@ + Status - Sherlock

Status

Scheduled maintenances

Maintenance operations and upgrades are scheduled on Sherlock on a regular basis. Per the University's Minimum Security policies, we deploy security patches on Sherlock as required for compliance.

Components and services#

Sherlock status is

For more details about Sherlock components and services, see the status dashboard.

Current usage#

\ No newline at end of file diff --git a/docs/user-guide/gpu/index.html b/docs/user-guide/gpu/index.html new file mode 100644 index 000000000..2afd3d85d --- /dev/null +++ b/docs/user-guide/gpu/index.html @@ -0,0 +1,78 @@ + GPU nodes - Sherlock

GPU nodes

To support the latest computing advancements in many fields of science, Sherlock features a number of compute nodes with GPUs that can be used to run a variety of GPU-accelerated applications. Those nodes are available to everyone, but are a scarce, highly-demanded resource, so getting access to them may require some wait time in queue.

Getting your own GPU nodes

If you need frequent access to GPU nodes, we recommend considering becoming an owner on Sherlock, so you can have immediate access to your GPU nodes when you need them.

GPU nodes#

A limited number of GPU nodes are available in the gpu partition. Anybody running on Sherlock can submit a job there. As owners contribute to expand Sherlock, more GPU nodes are added to the owners partition, for use by PI groups which purchased their own compute nodes.

There are a variety of different GPU configuration available in the gpu partition. To see the available GPU types, please see the GPU types section.

Submitting a GPU job#

To submit a GPU job, you'll need to use the --gpus (or -G) option in your batch script or command line submission options.

For instance, the following script will request one GPU for two hours in the gpu partition, and run the GPU-enabled version of gromacs:

#!/bin/bash
+#SBATCH -p gpu
+#SBATCH -c 10
+#SBATCH -G 1
+
+ml load gromacs/2016.3
+
+srun gmx_gpu ...
+

You can also directly run GPU processes on compute nodes with srun. For instance, the following command will display details about the GPUs allocated to your job:

$ srun -p gpu --gpus 2 nvidia-smi
+Fri Jul 28 12:41:49 2017
++-----------------------------------------------------------------------------+
+| NVIDIA-SMI 375.51                 Driver Version: 375.51                    |
+|-------------------------------+----------------------+----------------------+
+| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |
+| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |
+|===============================+======================+======================|
+|   0  Tesla P40           On   | 0000:03:00.0     Off |                    0 |
+| N/A   26C    P8    10W / 250W |      0MiB / 22912MiB |      0%   E. Process |
++-------------------------------+----------------------+----------------------+
+|   1  Tesla P40           On   | 0000:04:00.0     Off |                    0 |
+| N/A   24C    P8    10W / 250W |      0MiB / 22912MiB |      0%   E. Process |
++-------------------------------+----------------------+----------------------+
+
++-----------------------------------------------------------------------------+
+| Processes:                                                       GPU Memory |
+|  GPU       PID  Type  Process name                               Usage      |
+|=============================================================================|
+|  No running processes found                                                 |
++-----------------------------------------------------------------------------+
+

GPU resources MUST be requested explicitly

Jobs will be rejected at submission time if they don't explicitly request GPU resources.

The gpu partition only accepts jobs explicitly requesting GPU resources. If they don't, they will be rejected with the following message:

$ salloc -p gpu
+srun: error: Unable to allocate resources: Job violates accounting/QOS policy (job submit limit, user's size and/or time limits)
+

Interactive sessions#

As for any other compute node, you can submit an interactive job and request a shell on a GPU node with the following command:

$ salloc -p gpu --gpus 1
+salloc: job 38068928 queued and waiting for resources
+salloc: job 38068928 has been allocated resources
+$ nvidia-smi --query-gpu=index,name --format=csv,noheader
+0, Tesla V100-SXM2-16GB
+

Instant lightweight GPU instances#

Given that some tasks don't necessarily require a full-fledged, top-of-the-line GPU, lightweight GPU instances are provided to allow instant access to GPU resources for quick debugging, prototyping or testing jobs.

Lightweight GPU instances

Lightweight GPU instances leverage NVIDIA’s Multi-Instance GPU (MIG) to provide multiple fully isolated GPU instances on the same physical GPU, each with their own high-bandwidth memory, cache, and compute cores.

Those GPU instances are instantly available via the dev partition, and can be requested with the sh_dev command:

# sh_dev -g 1
+[...]
+[kilian@sh03-17n15 ~] (job 17628407) $ nvidia-smi -L
+GPU 0: NVIDIA A30 (UUID: GPU-ac772b5a-123a-dc76-9480-5998f435fe84)
+  MIG 1g.6gb      Device  0: (UUID: MIG-87e5d835-8046-594a-b237-ccc770b868ef)
+

For interactive apps in the Sherlock OnDemand interface, requesting a GPU in the dev partition will initiate an interactive session with access to a lightweight GPU instance.

gpu_dev

GPU types#

Since Sherlock features many different types of GPUs, each with its own technical characteristics, performance profiles and specificities, you may want to ensure that your job runs on a specific type of GPU.

To that end, Slurm allows users to specify constraints when submitting jobs, which will indicate the scheduler that only nodes having features matching the job constraints could be used to satisfy the request. Multiple constraints may be specified and combined with various operators (please refer to the official Slurm documentation for details).

The list of available features on compute nodes can be obtained with the node_feat1 command. And more specifically, to list the GPU-related features of nodes in the gpu partition::

$ node_feat -p gpu | grep GPU_
+GPU_BRD:TESLA
+GPU_GEN:PSC
+GPU_MEM:16GB
+GPU_MEM:24GB
+GPU_SKU:TESLA_P100_PCIE
+GPU_SKU:TESLA_P40
+

You can use node_feat without any option to list all the features of all the nodes in all the partitions. But please note that node_feat will only list the features of nodes from partitions you have access to, so output may vary depending on your group membership.

The different characteristics2 of various GPU types are listed in the following table

Slurm feature Description Possible values Example job constraint
GPU_BRD GPU brand GEFORCE: GeForce / TITAN
TESLA: Tesla
#SBATCH -C GPU_BRD:TESLA
GPU_GEN GPU generation PSC: Pascal
MXW: Maxwell
#SBATCH -C GPU_GEN:PSC
GPU_MEM Amount of GPU memory 16GB, 24GB #SBATCH -C GPU_MEM:16GB
GPU_SKU GPU model TESLA_P100_PCIE
TESLA_P40
#SBATCH -C GPU_SKU:TESLA_P40

Depending on the partitions you have access to, more features may be available to be requested in your jobs.

For instance, to request a Tesla GPU for you job, you can use the following submission options:

$ srun -p gpu -G 1 -C GPU_BRD:TESLA nvidia-smi -L
+GPU 0: Tesla P100-SXM2-16GB (UUID: GPU-4f91f58f-f3ea-d414-d4ce-faf587c5c4d4)
+

Unsatisfiable constraints

If you specify a constraint that can't be satisfied in the partition you're submitting your job to, the job will be rejected by the scheduler. For instance, requesting a RTX3090 GPU in the gpu partition, which doesn't feature any, will result in an error:

$ srun -p gpu -G 1 -C GPU_SKU:RTX_3090 nvidia-smi -L
+srun: error: Unable to allocate resources: Requested node configuration is not available
+

For more information about requesting specific node features and adding job constraints, you can also refer to the "Node features" page.

GPU compute modes#

By default, GPUs on Sherlock are set in the Exclusive Process compute mode3, to provide the best performance and an isolated environment for jobs, out of the box.

Some software may require GPUs to be set to a different compute mode, for instance to share a GPU across different processes within the same application.

To handle that case, we developed a specific option, --gpu_cmode, that users can add to their srun and sbatch submission options, to choose the compute mode for the GPUs allocated to their job.

Here's the list of the different compute modes supported on Sherlock's GPUs:

GPU compute mode --gpu_cmode option Description
"Default" shared Multiple contexts are allowed per device (NVIDIA default)
"Exclusive Process" exclusive Only one context is allowed per device, usable from multiple threads at a time (Sherlock default)
"Prohibited" prohibited No CUDA context can be created on the device

By default, or if the --gpu_cmode option is not specified, GPUs will be set in the "Exclusive Process" mode, as demonstrated by this example command:

$ srun -p gpu -G 1 nvidia-smi
++-----------------------------------------------------------------------------+
+| NVIDIA-SMI 387.26                 Driver Version: 387.26                    |
+|-------------------------------+----------------------+----------------------+
+| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |
+| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |
+|===============================+======================+======================|
+|   0  Tesla P40           On   | 00000000:03:00.0 Off |                    0 |
+| N/A   22C    P8    10W / 250W |      0MiB / 22912MiB |      0%   E. Process |
++-------------------------------+----------------------+----------------------+
+

With the --gpu_cmode option, the scheduler will set the GPU compute mode to the desired value before execution:

$ srun -p gpu -G 1 --gpu_cmode=shared nvidia-smi
++-----------------------------------------------------------------------------+
+| NVIDIA-SMI 387.26                 Driver Version: 387.26                    |
+|-------------------------------+----------------------+----------------------+
+| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |
+| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |
+|===============================+======================+======================|
+|   0  Tesla P40           On   | 00000000:03:00.0 Off |                    0 |
+| N/A   22C    P8    10W / 250W |      0MiB / 22912MiB |      0%      Default |
++-------------------------------+----------------------+----------------------+
+

Tip

"Default" is the name that the NVIDIA System Management Interface (nvidia-smi) uses to describe the mode where a GPU can be shared between different processes. It does not represent the default GPU compute mode on Sherlock, which is "Exclusive Process".

Advanced options#

A number of submission options are available when submitting GPU jobs, to request specific resource mapping or task binding options.

Here are some examples to allocate a set of resources as a function of the number of requested GPUs:

  • --cpus-per-gpu: requests a number of CPUs per allocated GPU.

    For instance, the following options will allocate 2 GPUs and 4 CPUs:

    $ salloc -p gpu -G 2 --cpus-per-gpu=2
    +
  • --gpus-per-node: requests a number of GPUs per node,

  • --gpus-per-task: requests a number of GPUs per spawned task,
  • --mem-per-gpu: allocates (host) memory per allocated GPU.

Other options can help set particular GPU properties (topology, frequency...):

  • --gpu-bind: specify task/GPU binding mode.

    By default every spawned task can access every GPU allocated to the job. This option can help making sure that tasks are bound to the closest GPU, for better performance.

  • --gpu-freq: specify GPU and memory frequency. For instance:

    $ srun -p test -G 1 --gpu-freq=highm1,verbose /bin/true
    +GpuFreq=memory_freq:2600,graphics_freq:758
    +

Those options are all available to the srun/sbatch/salloc commands, and more details about each of them can be found in the Slurm documentation.

Conflicting options

Given the multitude of options, it's very easy to submit a job with conflicting options. In most cases the job will be rejected.

For instance:

$ sbatch --gpus-per-task=1 --cpus-per-gpu=2  --cpus-per-task=1 ...
+
Here, the first two options implicitly set cpu-per-task to 2, while the third option explicitly sets cpus-per-task to 1. So the job's requirements are conflicting and can't be satisfied.

Environment and diagnostic tools#

nvtop#

GPU usage information can be shown with the nvtop tool. nvtop is available as a module, which can be loaded like this:

$ ml load system nvtop
+

nvtop provides an htop-like interactive view of GPU utilization. Users can monitor, estimate and fine tune their GPU resource requests with this tool. Percent GPU and memory utilization is shown as a user's GPU code is running.

nvtop


  1. See node_feat -h for more details. 

  2. The lists of values provided in the table are non exhaustive. 

  3. The list of available GPU compute modes and relevant details are available in the CUDA Toolkit Documentation 

\ No newline at end of file diff --git a/docs/user-guide/images/file_explorer_btn1.png b/docs/user-guide/images/file_explorer_btn1.png new file mode 100644 index 000000000..a33a461a7 Binary files /dev/null and b/docs/user-guide/images/file_explorer_btn1.png differ diff --git a/docs/user-guide/images/file_explorer_btn2.png b/docs/user-guide/images/file_explorer_btn2.png new file mode 100644 index 000000000..302a47345 Binary files /dev/null and b/docs/user-guide/images/file_explorer_btn2.png differ diff --git a/docs/user-guide/images/gpu_dev.png b/docs/user-guide/images/gpu_dev.png new file mode 100644 index 000000000..3304c7dc1 Binary files /dev/null and b/docs/user-guide/images/gpu_dev.png differ diff --git a/docs/user-guide/images/nvtop.png b/docs/user-guide/images/nvtop.png new file mode 100644 index 000000000..036f303dc Binary files /dev/null and b/docs/user-guide/images/nvtop.png differ diff --git a/docs/user-guide/images/ood_code-server.png b/docs/user-guide/images/ood_code-server.png new file mode 100644 index 000000000..5969d8e2c Binary files /dev/null and b/docs/user-guide/images/ood_code-server.png differ diff --git a/docs/user-guide/images/ood_dashboard.png b/docs/user-guide/images/ood_dashboard.png new file mode 100644 index 000000000..763f8ff57 Binary files /dev/null and b/docs/user-guide/images/ood_dashboard.png differ diff --git a/docs/user-guide/images/ood_jup.png b/docs/user-guide/images/ood_jup.png new file mode 100644 index 000000000..df9f0274e Binary files /dev/null and b/docs/user-guide/images/ood_jup.png differ diff --git a/docs/user-guide/images/ood_jup_notebook.png b/docs/user-guide/images/ood_jup_notebook.png new file mode 100644 index 000000000..d5010ed10 Binary files /dev/null and b/docs/user-guide/images/ood_jup_notebook.png differ diff --git a/docs/user-guide/images/ood_juplab.png b/docs/user-guide/images/ood_juplab.png new file mode 100644 index 000000000..f32581cc9 Binary files /dev/null and b/docs/user-guide/images/ood_juplab.png differ diff --git a/docs/user-guide/images/ood_logo.png b/docs/user-guide/images/ood_logo.png new file mode 100644 index 000000000..08c186736 Binary files /dev/null and b/docs/user-guide/images/ood_logo.png differ diff --git a/docs/user-guide/images/ood_matlab.png b/docs/user-guide/images/ood_matlab.png new file mode 100644 index 000000000..dee5c0849 Binary files /dev/null and b/docs/user-guide/images/ood_matlab.png differ diff --git a/docs/user-guide/images/ood_my_jobs.png b/docs/user-guide/images/ood_my_jobs.png new file mode 100644 index 000000000..968385e25 Binary files /dev/null and b/docs/user-guide/images/ood_my_jobs.png differ diff --git a/docs/user-guide/images/ood_new_job.png b/docs/user-guide/images/ood_new_job.png new file mode 100644 index 000000000..0b59aefdf Binary files /dev/null and b/docs/user-guide/images/ood_new_job.png differ diff --git a/docs/user-guide/images/ood_rstudio.png b/docs/user-guide/images/ood_rstudio.png new file mode 100644 index 000000000..50f183e5a Binary files /dev/null and b/docs/user-guide/images/ood_rstudio.png differ diff --git a/docs/user-guide/images/ood_sess.png b/docs/user-guide/images/ood_sess.png new file mode 100644 index 000000000..6bad1590f Binary files /dev/null and b/docs/user-guide/images/ood_sess.png differ diff --git a/docs/user-guide/images/ood_sess_support.png b/docs/user-guide/images/ood_sess_support.png new file mode 100644 index 000000000..5767d08a9 Binary files /dev/null and b/docs/user-guide/images/ood_sess_support.png differ diff --git a/docs/user-guide/images/ood_shell.png b/docs/user-guide/images/ood_shell.png new file mode 100644 index 000000000..09952328f Binary files /dev/null and b/docs/user-guide/images/ood_shell.png differ diff --git a/docs/user-guide/images/ood_submit_job.png b/docs/user-guide/images/ood_submit_job.png new file mode 100644 index 000000000..5d35e9179 Binary files /dev/null and b/docs/user-guide/images/ood_submit_job.png differ diff --git a/docs/user-guide/images/ood_tb.png b/docs/user-guide/images/ood_tb.png new file mode 100644 index 000000000..bf0597521 Binary files /dev/null and b/docs/user-guide/images/ood_tb.png differ diff --git a/docs/user-guide/ondemand/index.html b/docs/user-guide/ondemand/index.html new file mode 100644 index 000000000..7cd00daca --- /dev/null +++ b/docs/user-guide/ondemand/index.html @@ -0,0 +1,11 @@ + OnDemand - Sherlock

OnDemand

Introduction#

The Sherlock OnDemand interface allows you to conduct your research on Sherlock through a web browser. You can manage files (create, edit and move them), submit and monitor your jobs, see their output, check the status of the job queue, run a Jupyter notebook and much more, without logging in to Sherlock the traditional way, via a SSH terminal connection.

Quote

In neuroimaging there are a number of software pipelines that output HTML reports heavy on images files. Sherlock OnDemand allows users to check those as they appear on their $SCRATCH folder, for quick quality control, instead of having to mount remote filesystems, download data locally or move to any other storage location. Since the data itself is already quite big and costly to move, OnDemand is extremely helpful for fast assessment.

-- Carolina Ramirez, Williams PANLab

More documentation#

Open OnDemand was created by the Ohio Supercomputer Center. ood

The following documentation is specifically intended for using OnDemand on Sherlock. For more complete documentation about OnDemand in general, please see the extensive documentation for OnDemand created by OSC, including many video tutorials.

Connecting#

Connection information

To connect to Sherlock OnDemand, simply point your browser to https://ondemand.sherlock.stanford.edu

Sherlock OnDemand requires the same level of authentication than connecting to Sherlock over SSH. You will be prompted for your SUNet ID and password, and will go through the regular two-step authentication process.

The Sherlock OnDemand Dashboard will then open. From there, you can use the menus across the top of the page to manage files, get a shell on Sherlock, submit jobs or open interactive applications such as Jupyter Notebooks or RStudio sessions.

ood_dashboard

To end your Sherlock OnDemand session, click on the "Log Out" link at the top right of the Dashboard window and close your browser.

Getting a shell#

You can get shell access to Sherlock by choosing Clusters > Sherlock Shell Access from the top menu in the OnDemand Dashboard.

In the window that will open, you'll be logged in to one of Sherlock's login nodes, exactly as if you were using SSH to connect. Except you don't need to install any SSH client on your local machine, configure Kerberos or deal with your SSH client configuration to avoid endless two-factor prompts. How cool is that?

ood_shell

Managing files#

To create, edit or move files, click on the Files menu from the Dashboard page. A drop-down menu will appear, listing your most common storage locations on Sherlock: $HOME, $GROUP_HOME, $SCRATCH, $GROUP_SCRATCH, and all Oak storage you have access to, including your main $OAK1. Any rclone remotes you create on Sherlock to connect to cloud storage will appear here as well.

Choosing one of the file spaces opens the File Explorer in a new browser tab. The files in the selected directory are listed.

There are two sets of buttons in the File Explorer.

  • Under the three vertical dots menu next to each filename: fs_btn1 Those buttons allow you to View, Edit, Rename, Download, or Delete a file.

  • At the top of the window, on the right side: fs_btn2

    Button Function
    Open in Terminal Open a terminal window on Sherlock in a new browser tab
    Refresh Refresh the list of directory contents
    New File Create a new, empty file
    New Directory Create a new sub-directory
    Upload Copy a file from your local machine to Sherlock
    Download Download selected files to your local machine
    Copy/Move Copy or move selected files (after moving to a different directory)
    Delete Delete selected files
    Change directory Change your current working directory
    Copy path Copy the current working directory path to your clipboard
    Show Dotfiles Toggle the display of dotfiles (files starting with a ., which are usually hidden)
    Show Owner/Mode Toggle the display of owner and permission settings

Creating and editing jobs#

You can create new job scripts, edit existing scripts, and submit them to the scheduler through the Sherlock OnDemand interface.

From the top menus in the Dashboard, choose Jobs > Job Composer. A Job Composer window will open. There are two tabs at the top: Jobs and Templates.

In the Jobs tab, you'll find a list of the job you've submitted through OnDemand. The Templates tab will allow you to define your own job templates.

Creating a new job script#

To create a new job script. you'll need to follow the steps below.

Select a template#

Go to the Jobs tab in the Jobs Composer interface. You'll find a default template there: "Simple Sequential Job".

To create a new job script, click the blue New Job > From Default Template button in the upper left. You'll see a green message at the top of the page indicating: "Job was successfully created".

At the right of the Jobs page, you can see the Job Details, including the location of the script and the script name (by default, main_job.sh). Under that, you will see the contents of the job script in a section named Submit Script.

ood_new_job

Edit the job script#

You'll need to edit the job script, so it contains the commands and workflow that you want to submit to the scheduler.

If you need more resources than the defaults, you must include options to change them in the job script. For more details, see the Running jobs section.

You can edit the script in several ways:

  • click the blue Edit Files button at the top of the Jobs tab in the Jobs Composer window,
  • in the Jobs tab in the Jobs Composer window, find the Submit Script section at the bottom right. Click the blue Open Editor button.

After you save the file, the editor window remains open, but if you return to the Jobs Composer window, you will see that the content of your script has changed.

Edit the job options#

In the Jobs tab in the Jobs Composer window, click the blue Job Options button. The options for the selected job such as name, the job script to run, and the account it run under are displayed and can be edited. Click Save or Cancel to return to the job listing.

Submitting jobs#

To submit a job, select in in the Jobs tab in the Jobs Composer page. Click the green Submit button to submit the selected job. A message at the top of the window shows whether the job submission was successful or not. If it is not, you can edit the job script or options and resubmit. When the job is submitted successfully, the status of the job in the Jobs Composer window will change to Queued or Running. When the job completes, the status will change to Completed.

ood_submit_job

Monitoring jobs#

From the Dashboard page, The Jobs > Active Jobs top-level menu will bring you to a live view of Sherlock's scheduler queue. You'll be able to see all the jobs currently in queue, including running and pending jobs, as well as some details about individual jobs.

ood_my_jobs

At the bottom of the detailed view, you'll find two button that will bring you to the directory where that job's files are located, either in the File Manager or in a Shell session.

Interactive applications#

One of the main features of Sherlock OnDemand is the ability to run interactive applications directly from the web interface, without leaving your web browser.

Jupyter Notebooks#

You can run Jupyter Notebooks (using Python, Julia or other languages) through Sherlock OnDemand.

Some preliminary setup may be required

Before running your first Jupyter Notebook with IJulia, you'll need to run the following steps (this only needs to be done once):

$ ml julia
+$ julia
+julia> using Pkg;
+julia> Pkg.add("IJulia")
+

When you see the message that IJulia has been installed, you can end your interactive session.

To start a Jupyter session from Sherlock OnDemand:

  1. Select Interactive Apps > Jupyter Notebook from the top menu in the Dashboard page.

  2. In the screen that opens, specify the different parameters for your job (time limit, number of nodes, CPUs, partition to use, etc.). You can also choose to be notified by email when your notebook starts.

ood_jup

  1. Click the blue Launch button to start your JupyterHub session. You may have to wait in the queue for resources to become available for you.

  2. When your session starts, you can click on the blue Connect to Jupyter button to open your Jupyter Notebook. The Dashboard window will display information about your Jupyter session, including the name of the compute node it is running on, when it started, and how much time remains. ood_sess

  3. In your new Jupyter Notebook tab, you'll see 3 tabs: Files, Running and Clusters. ood_jup_notebook

By default, you are in the Files tab; that displays the contents of your $HOME directory on Sherlock. You can navigate through your files there.

Under the Running tab, you will see the list of all the notebooks or terminal sessions that you have currently running.

  1. You can now start a Jupyter Notebook:

    1. To open an existing Jupyter Notebook, which is already stored on Sherlock, navigate to its location in the Files tab and click on its name. A new window running the notebook will open.
    2. To create a new Jupyter Notebook, click on the New button at the top right of the file listing, and choose the kernel of your choice from the drop down.

To terminate your Jupyter Notebook session, go back to the Dashboard, and click on the My Interactive Sessions in the top menu. This will bring you to a page listing all your currently active interactive session. Identify the one you'd like to terminate and click on the red Cancel button.

JupyterLab#

To run JupyterLab via Sherlock OnDemand:

  1. Select Interactive Apps > JupyterLab from the top menu in the Dashboard page.

  2. In the screen that opens, specify the different parameters for your job (time limit, number of nodes, CPUs, partition to use, etc.). You can also choose to be notified by email when your session starts.

  3. Click the blue Launch button to start your JupyterLab session. You may have to wait in the queue for resources to become available.

  4. When your session starts, click the blue Connect to JupyterLab button. A new window opens with the JupyterLab interface.

  5. The first time you connect to JupyterLab via Sherlock OnDemand, you'll see 2 tabs: Files and Launcher.

ood_juplab

The Files tab displays the contents of your $HOME directory on Sherlock. You can navigate through your files there.

In the Launcher tab, you will have the option to create a new Jupyter Notebook new Console session by clicking the tile showing the kernel of your choice. You can also open the Terminal or a text editor for a variety of file types by clicking the corresponding tile.

To create a new kernel for IJulia:

  1. In the Launcher, click the Terminal tile in the "Other" section.

  2. In the Terminal, run the following commands:

    $ ml julia
    +$ julia
    +julia> using Pkg;
    +julia> Pkg.add("IJulia")
    +
  3. Open a new Launcher tab by clicking the + sign next to your open Terminal tab. Julia will now be listed in the "Notebook" and "Console" sections as an available kernel.

To create a custom kernel for a virtual environment using Python 3.x:

  1. In a shell session, activate your environment and run the following:

    $ pip3 install ipykernel
    +$ python3 -m ipykernel install --user --name env --display-name "My Env"
    +

    This will create a kernel for the environment env. It will appear as My Env in the JupyterLab Launcher.

    Creating a custom kernel for a Python 2.x environment

    When working with a Python 2.x environment, use the python/pip commands instead.

  2. The custom kernel will now be listed as option in the "Notebook" and "Console" sections in the JupyterLab Launcher. To start a Jupyter Notebook using your virtual environment, click on the tile for that kernel.

    Creating a custom kernel for a conda environment

    In order to use a kernel created from a conda environment, you must unload the python and py-jupyterlab modules from your JupyterLab session. This can be done using the JupyterLab Lmod extension. To use the Lmod extension, select the bottom tab in the left side menu of your JupyterLab window. You may also need to restart the kernel for your notebook or console.

MATLAB#

To run MATLAB via Sherlock OnDemand:

  1. Select Interactive Apps > MATLAB from the top menu in the Dashboard page.

  2. In the screen that opens, specify the different parameters for your job (time limit, number of nodes, CPUs, partition to use, etc.). You can also choose to be notified by email when your session starts.

  3. Click the blue Launch button to start your MATLAB session. You may have to wait in the queue for resources to become available.

  4. When your session starts, click the blue Connect to MATLAB button. A new window opens with the MATLAB interface.

ood_matlab

RStudio#

To run RStudio via Sherlock OnDemand:

  1. Select Interactive Apps > RStudio Server from the top menu in the Dashboard page.

  2. In the screen that opens, specify the different parameters for your job (time limit, number of nodes, CPUs, partition to use, etc.). You can also choose to be notified by email when your session starts.

  3. Click the blue Launch button to start your RStudio session. You may have to wait in the queue for resources to become available.

  4. When your session starts, click the blue Connect to RStudio Server button. A new window opens with the RStudio interface.

ood_rstudio

Installing packages in RStudio

You may encounter errors while installing R packages within RStudio. First try installing R packages in a shell session on the Sherlock command line. See our R packages documentation for more information.

TensorBoard#

To run TensorBoard via Sherlock OnDemand:

  1. Select Interactive Apps > TensorBoard from the top menu in the Dashboard page.

  2. In the screen that opens, specify the different parameters for your job (time limit, number of nodes, CPUs, partition to use, etc.). You can also choose to be notified by email when your session starts.

  3. Click the blue Launch button to start your TensorBoard session. You may have to wait in the queue for resources to become available.

  4. When your session starts, click the blue Connect to TensorBoard button. A new window opens with the TensorBoard interface.

ood_tb

VS Code#

You can use VS Code on Sherlock through the code-server interactive app.

Using your local VS Code with remote SSH

Connecting to Sherlock from VS Code on your local machine is not supported at this time due to a known issue with the closed-source "Remote SSH" extension.

To start a VS Code session via Sherlock OnDemand:

  1. Select Interactive Apps > code-server from the top menu in the Dashboard page.

  2. In the screen that opens, specify the different parameters for your job (time limit, number of nodes, CPUs, partition to use, etc.). You can also choose to be notified by email when your session starts.

  3. Click the blue Launch button to start your code-server session. You may have to wait in the queue for resources to become available.

  4. When your session starts, click the blue Connect to code-server button. A new window opens with the code-server interface.

ood_code-server

Support#

If you are experiencing issues with Sherlock or your interactive session, you can contact us directly from Sherlock OnDemand.

To submit a ticket about Sherlock or Sherlock OnDemand in general:

  1. Select Help -> Submit Support Ticket from the top menu in the Dashboard page.

  2. In the screen that opens, complete the Support Ticket form. When applicable, please provide:

    • the full path to any files involved in your question or problem,

    • the command(s) you ran, and/or the job submission script(s) you used,

    • the exact, entire error message (or trace) you received.

  3. Click the blue Submit support ticket form. Research Computing support will respond to you as soon as we are able.

To submit a ticket about your current or recent interactive session:

  1. Select My Interactive Sessions from the top menu in the Dashboard page.

  2. In the screen that opens, find the card for the session you need help with. Active sessions will have a green header, and past sessions will have a gray header. Click that card's Submit support ticket link to open the Support Ticket form. ood_sess_support

  3. Complete the Support Ticket form. When applicable, please provide:

    • the full path to any files involved in your question or problem,

    • the command(s) you ran, and/or the job submission script(s) you used,

    • the exact, entire error message (or trace) you received.

  4. Click the blue Submit support ticket form. Research Computing support will respond to you as soon as we are able.


  1. if you have access to the Oak storage system

\ No newline at end of file diff --git a/docs/user-guide/running-jobs/index.html b/docs/user-guide/running-jobs/index.html new file mode 100644 index 000000000..fd672d41d --- /dev/null +++ b/docs/user-guide/running-jobs/index.html @@ -0,0 +1,155 @@ + Running jobs - Sherlock

Running jobs

Login nodes#

Login nodes are not for computing

Login nodes are shared among many users and therefore must not be used to run computationally intensive tasks. Those should be submitted to the scheduler which will dispatch them on compute nodes.

The key principle of a shared computing environment is that resources are shared among users and must be scheduled. It is mandatory to schedule work by submitting jobs to the scheduler on Sherlock. And since login nodes are a shared resource, they must not be used to execute computing tasks.

Acceptable use of login nodes include:

  • lightweight file transfers,
  • script and configuration file editing,
  • job submission and monitoring.

Resource limits are enforced

To minimize disruption and ensure a comfortable working environment for users, resource limits are enforced on login nodes, and processes started there will automatically be terminated if their resource usage (including CPU time, memory and run time) exceed those limits.

Slurm commands#

Slurm allows requesting resources and submitting jobs in a variety of ways. The main Slurm commands to submit jobs are listed in the table below:

Command Description Behavior
salloc Request resources and allocates them to a job Starts a new shell, but does not execute anything
srun Request resources and runs a command on the allocated compute node(s) Blocking command: will not return until the job ends
sbatch Request resources and runs a script on the allocated compute node(s) Asynchronous command: will return as soon as the job is submitted

Interactive jobs#

Dedicated nodes#

Interactive jobs allow users to log in to a compute node to run commands interactively on the command line. They could be an integral part of an interactive programming and debugging workflow. The simplest way to establish an interactive session on Sherlock is to use the sh_dev command:

$ sh_dev
+

This will open a login shell using one core and 4 GB of memory on one node for one hour. The sh_dev sessions run on dedicated compute nodes. This ensures minimal wait times when you need to access a node for testing script, debug code or any kind of interactive work.

sh_dev also provides X11 forwarding via the submission host (typically the login node you're connected to) and can thus be used to run GUI applications.

Compute nodes#

If you need more resources1, you can pass options to sh_dev, to request more CPU cores, more nodes, or even run in a different partition. sh_dev -h will provide more information:

$ sh_dev -h
+sh_dev: start an interactive shell on a compute node.
+
+Usage: sh_dev [OPTIONS]
+    Optional arguments:
+        -c      number of CPU cores to request (OpenMP/pthreads, default: 1)
+        -n      number of tasks to request (MPI ranks, default: 1)
+        -N      number of nodes to request (default: 1)
+        -m      memory amount to request (default: 4GB)
+        -p      partition to run the job in (default: dev)
+        -t      time limit (default: 01:00:00)
+        -r      allocate resources from the named reservation (default: none)
+        -J      job name (default: sh_dev)
+        -q      quality of service to request for the job (default: normal)
+
+    Note: the default partition only allows for limited amount of resources.
+    If you need more, your job will be rejected unless you specify an
+    alternative partition with -p.
+

Another way to get an interactive session on a compute node is to use srun to execute a shell through the scheduler. For instance, to start a bash session on a compute node, with the default resource requirements (one core for 2 hours), you can run:

$ srun --pty bash
+

The main advantage of this approach is that it will allow you to specify the whole range of submission options that sh_dev may not support.

Finally, if you prefer to submit an existing job script or other executable as an interactive job, you can use the salloc command:

$ salloc script.sh
+

If you don't provide a command to execute, salloc will start a Slurm job and allocate resources for it, but it will not automatically connect you to the allocated node(s). It will only start a new shell on the same node you launched salloc from, and set up the appropriate $SLURM_* environment variables. So you will typically need to look at them to see what nodes have been assigned to your job. For instance:

$ salloc
+salloc: Granted job allocation 655914
+$ echo $SLURM_NODELIST
+sh02-01n55
+$ ssh sh02-01n55
+[...]
+sh02-01n55 ~ $
+

Connecting to nodes#

Login to compute nodes

Users are not allowed to login to compute nodes unless they have a job running there.

If you SSH to a compute node without any active job allocation, you'll be greeted by the following message:

$ ssh sh02-01n01
+Access denied by pam_slurm_adopt: you have no active jobs on this node
+Connection closed
+$
+

Once you have a job running on a node, you can SSH directly to it and run additional processes2, or observe how you application behaves, debug issues, and so on.

The salloc command supports the same parameters as sbatch, and can override any default configuration. Note that any #SBATCH directive in your job script will not be interpreted by salloc when it is executed in this way. You must specify all arguments directly on the command line for them to be taken into account.

Batch jobs#

It's easy to schedule batch jobs on Sherlock. A job is simply an instance of your program, for example your R, Python or Matlab script that is submitted to and executed by the scheduler (Slurm). When you submit a job with the sbatch command it's called a batch job and it will either run immediately or will pend (wait) in the queue.

The length of time a job will pend is determined by several factors; how many other jobs are in the queue ahead or your job and how many resources your job is requesting are the most important factors. One key principle when requesting resources is to always try to request as few resources as you need to get your job done. This will ensure your job pends in the queue for as little time as necessary. To get a rough idea of what resources are needed, you can profile your code/jobs in an sh_dev session in real-time with htop, nvtop, sacct etc. The basic concept is to tell the scheduler what resources your job needs and how long is should run. These resources are:

CPUs: How many CPUs the program you are calling the in the sbatch script needs, unless it can utilize multiple CPUs at once you should request a single CPU. Check your code's documentation or try running in an interactive session with sh_dev and run htop if you are unsure.

GPUs: If your code is GPU enabled, how many GPUs does your code need? Use the diagnostic tool nvtop to see if your code is capable of running on multiple GPUs and how much GPU memory it's using in real-time.

memory (RAM): How much memory your job will consume. Some things to consider, will it load a large file or matrix into memory? Does it consume a lot of memory on your laptop? Often the default memory is sufficient for many jobs.

time: How long will it take for your code to run to completion?

partition: What set of compute nodes on Sherlock will you run on, normal, gpu, owners, bigmem? Use the sh_part command to see what partitions you are allowed to run on. The default partition on Sherlock is the normal partition.

Next, you tell the scheduler what your job should should do: load modules and run your code. Note that any logic you can code into a bash script with the bash scripting language can also be coded into an sbatch script.

This example job, will run the Python script mycode.py for 10 minutes on the normal partition using 1 CPU and 8 GB of memory. To aid in debugging we are naming this job "test_job" and appending the Job ID (%j) to the two output files that Slurm creates when a job is run. The output files are written to the directory in which you launched your job in, you can also specify a different path. One file will contain any errors and the other will contain non-error output. Look in these 2 files ending in .err and .out for useful debugging information and error output.

Because it's a Python 3 script that uses some Numpy code, we need to load the python/3.6.1 and the py-numpy/1.19.2_py36 modules. The Python script is then called just as you would on the command line at the end of the sbatch script:

sbatch script:

#!/usr/bin/bash
+#SBATCH --job-name=test_job
+#SBATCH --output=test_job.%j.out
+#SBATCH --error=test_job.%j.err
+#SBATCH --time=10:00
+#SBATCH -p normal
+#SBATCH -c 1
+#SBATCH --mem=8GB
+module load python/3.6.1
+module load py-numpy/1.19.2_py36
+python3 mycode.py
+
Create and edit the sbatch script with a text editor like vim/nano or the OnDemand file manager. Then save the file, in this example we call it "test.sbatch".

Submit to the scheduler with the sbatch command:

$sbatch test.sbatch
+
Monitor your job and job ID in the queue with the squeue command:

$squeue -u $USER
+   JOBID     PARTITION     NAME     USER    ST       TIME  NODES  NODELIST(REASON)
+   44915821    normal    test_job  <userID>  PD       0:00      1 (Priority)
+

Notice that the jobs state (ST) in pending (PD)

Once the job starts to run that will change to R:

$squeue -u $USER
+    JOBID     PARTITION     NAME     USER     ST      TIME  NODES   NODELIST(REASON)
+    44915854    normal test_job  <userID>     R      0:10     1     sh02-01n49
+

Here you can see it has been running (R) on the compute node sh02-01n49 for 10 seconds. While your job is running you have ssh access to that node and can run diagnostic tools such as htop and nvtop in order to monitor your job's memory and CPU/GPU utilization in real-time. You can also manage this job based on the JobID assigned to it (44915854). For example the job can be cancelled with the scancel command.

Resource requests#

To get a better idea of the amount of resources your job will need, you can use the ruse command, available as a module:

$ module load system ruse
+

ruse is a command line tool developed by Jan Moren to measure a process' resource usage. It periodically measures the resource use of a process and its subprocesses, and can help you find out how much resource to allocate to your job. It will determine the actual memory, execution time and cores that individual programs or MPI applications need to request in their job submission options.

ruse periodically samples the process and its subprocesses and keeps track of the CPU, time and maximum memory use. It also optionally records the sampled values over time. The purpose or Ruse is not to profile processes in detail, but to follow jobs that run for many minutes, hours or days, with no performance impact and without changing the measured application in any way.

You'll find complete documentation and details about ruse's usage on the project webpage, but here are a few useful examples.

Sizing a job#

In its simplest form, ruse can help discover how much resources a new script or application will need. For instance, you can start a sizing session on a compute node with an overestimated amount of resources, and start your application like this:

$ ruse ./myapp
+

This will generate a <myapp>-<pid>/ruse output file in the current directory, looking like this:

Time:           02:55:47
+Memory:         7.4 GB
+Cores:          4
+Total_procs:    3
+Active_procs:   2
+Proc(%): 99.9  99.9
+

It shows that myapp:

  • ran for almost 3 hours
  • used a little less than 8B of memory
  • had 4 cores available,
  • spawned 3 processes, among which at most 2 were active at the same time,
  • that both active processes each used 99.9% of a CPU core

This information could be useful in tailoring the job resource requirements to its exact needs, making sure that the job won't be killed for exceeding one of its resource limits, and that the job won't have to wait too long in queue for resources that it won't use. The corresponding job request could look like this:

#SBATCH --time 3:00:00
+#SBATCH --mem 8GB
+#SBATCH --cpus-per-task 2
+
Verifying a job's usage#

It's also important to verify that applications, especially parallel ones, stay in the confines of the resources they've requested. For instance, a number of parallel computing libraries will make the assumption that they can use all the resources on the host, will automatically determine the number of physical CPU cores present on the compute node, and start as many processes. This could be a significant issue if the job requested less CPUs, as more processes will be constrained on less CPU cores, which will result in node overload and degraded performance for the application.

To avoid this, you can start your application with ruse and report usage for each time step specified with -t. You can also request the reports to be displayed directly on stdout rather than stored in a file.

For instance, this will report usage every 10 seconds:

$ ruse -s -t10 --stdout ./myapp
+   time         mem   processes  process usage
+  (secs)        (MB)  tot  actv  (sorted, %CPU)
+     10        57.5    17    16   33  33  33  25  25  25  25  25  25  25  25  20  20  20  20  20
+     20        57.5    17    16   33  33  33  25  25  25  25  25  25  25  25  20  20  20  20  20
+     30        57.5    17    16   33  33  33  25  25  25  25  25  25  25  25  20  20  20  20  20
+
+Time:           00:00:30
+Memory:         57.5 MB
+Cores:          4
+Total_procs:   17
+Active_procs:  16
+Proc(%): 33.3  33.3  33.2  25.0  25.0  25.0  25.0  25.0  25.0  24.9  24.9  20.0  20.0  20.0  20.0  19.9
+

Here, we can see that despite having being allocated 4 CPUs, the application started 17 threads, 16 of which were active running intensive computations, with the unfortunate consequence that each process could only use a fraction of a CPU.

In that case, to ensure optimal performance and system operation, it's important to modify the application parameters to make sure that it doesn't start more computing processes than the number of requested CPU cores.

Available resources#

Whether you are submitting a batch job, or an or interactive job, it's important to know the resources that are available to you. For this reason, we provide sh_part, a command-line tool to help answer questions such as:

  • which partitions do I have access to?
  • how many jobs are running on them?
  • how many CPUs can I use?
  • where should I submit my jobs?

sh_part can be executed on any login or compute node to see what partitions are available to you, and its output looks like this:

$ sh_part
+     QUEUE STA   FREE  TOTAL   FREE  TOTAL RESORC  OTHER MAXJOBTIME    CORES       NODE   GRES
+ PARTITION TUS  CORES  CORES  NODES  NODES PENDNG PENDNG  DAY-HR:MN    /NODE     MEM-GB (COUNT)
+    normal   *    153   1792      0     84    23k    127    7-00:00    20-24    128-191 -
+    bigmem         29     88      0      2      0      8    1-00:00    32-56   512-3072 -
+       dev         31     40      0      2      0      0    0-02:00       20        128 -
+       gpu         47    172      0      8    116      1    7-00:00    20-24    191-256 gpu:4(S:0-1)(2),gpu:4(S:0)(6)
+

The above example shows four possible partitions where jobs can be submitted: normal, bigmem, dev, or gpu. It also provides additional information such as the maximum amount of time allowed in each partition (MAXJOBTIME), the number of other jobs already in queue, along with the ranges of memory available on nodes in each partition.

  • in the QUEUE PARTITION column, the * character indicates the default partition.
  • the RESOURCE PENDING column shows the core count of pending jobs that are waiting on resources,
  • the OTHER PENDING column lists core counts for jobs that are pending for other reasons, such as licenses, user, group or any other limit,
  • the GRES column shows the number and type of Generic RESsources available in that partition (typically, GPUs), which CPU socket they're available from, and the number of nodes that feature that specific GRES combination. So for instance, in the output above, gpu:4(S:0-1)(2) means that the gpu partition features 2 nodes with 4 GPUs each, and that those GPUs are accessible from both CPU sockets (S:0-1).

Recurring jobs#

Warning

Cron tasks are not supported on Sherlock.

Users are not allowed to create cron jobs on Sherlock, for a variety of reasons:

  • resources limits cannot be easily enforced in cron jobs, meaning that a single user can end up monopolizing all the resources of a login node,
  • no amount of resources can be guaranteed when executing a cron job, leading to unreliable runtime and performance,
  • user cron jobs have the potential of bringing down whole nodes by creating fork bombs, if they're not carefully crafted and tested,
  • compute and login nodes could be redeployed at any time, meaning that cron jobs scheduled there could go away without the user being notified, and cause all sorts of unexpected results,
  • cron jobs could be mistakenly scheduled on several nodes and run multiple times, which could result in corrupted files.

As an alternative, if you need to run recurring tasks at regular intervals, we recommend the following approach: by using the --begin job submission option, and creating a job that resubmits itself once it's done, you can virtually emulate the behavior and benefits of a cron job, without its disadvantages: your task will be scheduled on a compute node, and use all of the resources it requested, without being impacted by anything else.

Depending on your recurring job's specificities, where you submit it and the state of the cluster at the time of execution, the starting time of that task may not be guaranteed and result in a delay in execution, as it will be scheduled by Slurm like any other jobs. Typical recurring jobs, such as file synchronization, database updates or backup tasks don't require strict starting times, though, so most users find this an acceptable trade-off.

The table below summarizes the advantages and inconvenients of each approach:

Cron tasks Recurring jobs
Authorized on Sherlock
Dedicated resources for the task
Persistent across node redeployments
Unique, controlled execution
Precise schedule

Recurrent job example#

The script below presents an example of such a recurrent job, that would emulate a cron task. It will append a timestamped line to a cron.log file in your $HOME directory and run every 7 days.

cron.sbatch
#!/bin/bash
+#SBATCH --job-name=cron
+#SBATCH --begin=now+7days
+#SBATCH --dependency=singleton
+#SBATCH --time=00:02:00
+#SBATCH --mail-type=FAIL
+
+
+## Insert the command to run below. Here, we're just storing the date in a
+## cron.log file
+date -R >> $HOME/cron.log
+
+## Resubmit the job for the next execution
+sbatch $0
+

If the job payload (here the date command) fails for some reason and generates and error, the job will not be resubmitted, and the user will be notified by email.

We encourage users to get familiar with the submission options used in this script by giving a look at the sbatch man page, but some details are given below:

Submission option or command Explanation
--job-name=cron makes it easy to identify the job, is used by the --dependency=singleton option to identify identical jobs, and will allow cancelling the job by name (because its jobid will change each time it's submitted)
--begin=now+7days will instruct the scheduler to not even consider the job for scheduling before 7 days after it's been submitted
--dependency=singleton will make sure that only one cron job runs at any given time
--time=00:02:00 runtime limit for the job (here 2 minutes). You'll need to adjust the value depending on the task you need to run (shorter runtime requests usually result in the job running closer to the clock mark)
--mail-type=FAIL will send an email notification to the user if the job ever fails
sbatch $0 will resubmit the job script by calling its own name ($0) after successful execution

You can save the script as cron.sbatch or any other name, and submit it with:

$ sbatch cron.sbatch
+

It will start running for the first time 7 days after you submit it, and it will continue to run until you cancel it with the following command (using the job name, as defined by the --job-name option):

$ scancel -n cron
+

Persistent jobs#

Recurring jobs described above are a good way to emulate cron jobs on Sherlock, but don't fit all needs, especially when a persistent service is required.

For instance, workflows that require a persistent database connection would benefit from an ever-running database server instance. We don't provide persistent database services on Sherlock, but instructions and examples on how to submit database server jobs are provided for MariaDB or PostgreSQL.

In case those database instances need to run pretty much continuously (within the limits of available resources and runtime maximums), the previous approach described in the recurring jobs section could fall a bit short. Recurring jobs are mainly designed for jobs that have a fixed execution time and don't reach their time limit, but need to run at given intervals (like synchronization or backup jobs, for instance).

Because a database server process will never end within the job, and will continue until the job reaches its time limit, the last resubmission command (sbatch $0) will actually never be executed, and the job won't be resubmitted.

To work around this, a possible approach is to catch a specific signal sent by the scheduler at a predefined time, before the time limit is reached, and then re-queue the job. This is easily done with the Bash trap command, which can be instructed to re-submit a job when it receives the SIGUSR1 signal.

Automatically resubmitting a job doesn't make it immediately runnable

Jobs that are automatically re-submitted using this technique won't restart right away: the will get back in queue and stay pending until their execution conditions (priority, resources, usage limits...) are satisfied.

Persistent job example#

Here's the recurring job example from above, modified to:

  1. instruct the scheduler to send a SIGUSR1 signal to the job 90 seconds3 before reaching its time limit (with the #SBATCH --signal option),
  2. re-submit itself upon receiving that SIGUSR1 signal (with the trap command)
persistent.sbatch
#!/bin/bash
+#
+#SBATCH --job-name=persistent
+#SBATCH --dependency=singleton
+#SBATCH --time=00:05:00
+#SBATCH --signal=B:SIGUSR1@90
+
+# catch the SIGUSR1 signal
+_resubmit() {
+    ## Resubmit the job for the next execution
+    echo "$(date): job $SLURM_JOBID received SIGUSR1 at $(date), re-submitting"
+    sbatch $0
+}
+trap _resubmit SIGUSR1
+
+## Insert the command to run below. Here, we're just outputting the date every
+## 10 seconds, forever
+
+echo "$(date): job $SLURM_JOBID starting on $SLURM_NODELIST"
+while true; do
+    echo "$(date): normal execution"
+    sleep 60
+done
+

Long running processes need to run in the background

If your job's actual payload (the application or command you want to run) is running continuously for the whole duration of the job, it needs to be executed in the background, so the trap can be processed.

To run your application in the background, just add a & at the end of the command and then add a wait statement at the end of the script, to make the shell wait until the end of the job.

For instance, if you were to run a PostgreSQL database server, the while true ... done loop in the previous example could be replaced by something like this:

postgres -i -D $DB_DIR &
+wait
+

Persistent $JOBID#

One potential issue with having a persistent job re-submit itself when it reaches its runtime limit is that it will get a different $JOBID each time it's (re-)submitted.

This could be particularly challenging when other jobs depend on it, like in the database server scenario, where client jobs would need to start only if the database server is running. This can be achieved with job dependencies, but those dependencies have to be expressed using jobids, so having the server job's id changing at each re-submission will be difficult to handle.

To avoid this, the re-submission command (sbatch $0) can be replaced by a re-queuing command:

scontrol requeue $SLURM_JOBID
+

The benefit of that change is that the job will keep the same $JOBID across all re-submissions. And now, dependencies can be added to other jobs using that specific $JOBID, without having to worry about it changing. And there will be only one $JOBID to track for that database server job.

The previous example can then be modified as follows:

persistent.sbatch
#!/bin/bash
+#SBATCH --job-name=persistent
+#SBATCH --dependency=singleton
+#SBATCH --time=00:05:00
+#SBATCH --signal=B:SIGUSR1@90
+
+# catch the SIGUSR1 signal
+_requeue() {
+    echo "$(date): job $SLURM_JOBID received SIGUSR1, re-queueing"
+    scontrol requeue $SLURM_JOBID
+}
+trap '_requeue' SIGUSR1
+
+## Insert the command to run below. Here, we're just outputting the date every
+## 60 seconds, forever
+
+echo "$(date): job $SLURM_JOBID starting on $SLURM_NODELIST"
+while true; do
+    echo "$(date): normal execution"
+    sleep 60
+done
+

Submitting that job will produce an output similar to this:

Mon Nov  5 10:30:59 PST 2018: Job 31182239 starting on sh-06-34
+Mon Nov  5 10:30:59 PST 2018: normal execution
+Mon Nov  5 10:31:59 PST 2018: normal execution
+Mon Nov  5 10:32:59 PST 2018: normal execution
+Mon Nov  5 10:33:59 PST 2018: normal execution
+Mon Nov  5 10:34:59 PST 2018: Job 31182239 received SIGUSR1, re-queueing
+slurmstepd: error: *** JOB 31182239 ON sh-06-34 CANCELLED AT 2018-11-05T10:35:06 DUE TO JOB REQUEUE ***
+Mon Nov  5 10:38:11 PST 2018: Job 31182239 starting on sh-06-34
+Mon Nov  5 10:38:11 PST 2018: normal execution
+Mon Nov  5 10:39:11 PST 2018: normal execution
+

The job runs for 5 minutes, then received the SIGUSR1 signal, is re-queued, restarts for 5 minutes, and so on, until it's properly scancelled.


  1. The dedicated partition that sh_dev uses by default only allows up to 2 cores and 8 GB or memory per user at any given time. So if you need more resources for your interactive session, you may have to specify a different partition. 

  2. Please note that your SSH session will be attached to your running job, and that resources used by that interactive shell will count towards your job's resource limits. So if you start a process using large amounts of memory via SSH while your job is running, you may hit the job's memory limits, which will trigger its termination. 

  3. Due to the resolution of event handling by the scheduler, the signal may be sent up to 60 seconds earlier than specified. 

\ No newline at end of file diff --git a/docs/user-guide/troubleshoot/index.html b/docs/user-guide/troubleshoot/index.html new file mode 100644 index 000000000..d0e5b0732 --- /dev/null +++ b/docs/user-guide/troubleshoot/index.html @@ -0,0 +1 @@ + Troubleshooting - Sherlock

Troubleshooting

Sherlock is a resource for research, and as such, it is in perpetual evolution, as hardware, applications, libraries, and modules are added, updated, and/or modified on a regular basis. Sometimes issues can appear where none existed before. When you find something missing or a behavior that seems odd, please let us know.

How to submit a support request#

Google it first!

When encountering issues with software, if the misbehavior involves an error message, the first step should always be to look up the error message online. There's a good chance somebody stumbled upon the same hurdles before, and may even provide some fix or workaround.

One of the most helpful Google searches is your_application sbatch. For example if you're having trouble submitting jobs or allocating resources (CPUs, time, memory) with Cell Ranger, search for cell ranger sbatch to see how others have successfully run your application on a cluster.

If you're facing issues you can't figure out, we're here to help. Feel free to email us at srcc-support@stanford.edu, but please keep the following points in mind to ensure a timely and relevant response to your support requests.

Please provide relevant information

We need to understand the issue you're facing, and in most cases, we need to be able to reproduce it, so it could be diagnosed and addressed. Please make sure to provide enough information so we could help you in the best possible way.

This typically involves providing the following information:

  • your SUNet ID,
  • some context about your problem (were you submitting a job, copying a file, compiling an application?),
  • if relevant, the full path to the files involved in your question or problem,
  • the name of node where you received the error (usually displayed in your command-line prompt),
  • the command(s) you ran, and/or the job submission script(s) you used,
  • the relevant job ID(s),
  • the exact, entire error message (or trace) you received.

Error messages are critical

This is very important. Without proper error messages, there is nothing we can do to help. And "it doesn't work" is not a proper error message. Also, please cut and paste the actual text of the output, commands, and error messages rather than screenshots in your tickets.
That way it is much easier for us to try to replicate your errors.

You can avoid email back and forth where we ask for all the relevant details, and thus delay the problem resolution, by providing all this information from the start. This will help us get to your problem immediately.

\ No newline at end of file diff --git a/index.html b/index.html new file mode 100644 index 000000000..b6a02466b --- /dev/null +++ b/index.html @@ -0,0 +1,6 @@ + Sherlock
Sherlock
The HPC cluster for all your computing needs
Need to access computing resources to support your sponsored or departmental research at Stanford? You may want to try out the Sherlock cluster! Funded and supported by the Provost and Dean of Research, Sherlock is a shared computing cluster available for use by all Stanford faculty and their research teams.
More information

Services

A one-stop shop for all our scientific computing needs

compute

All the resources you need in one place: compute nodes, GPUs, large memory nodes, blazing fast interconnect, parallel filesystems, and more!

explore

Sherlock provides all the software tools and storage resources you'll need to explore and analyze your research data.

discover

With a whole range of computational tools at your fingertips, scientific breakthroughs will just be a batch job away.

In a nutshell

All about Sherlock

What is Sherlock?#

Sherlock is a shared computing cluster available for use by all Stanford Faculty members and their research teams, for sponsored or departmental faculty research. All research teams on Sherlock have access to a base set of managed computing resources, GPU-based servers, and a multi-petabyte, high-performance parallel file system for short-term storage.

Faculty can supplement these shared nodes by purchasing additional servers, and become Sherlock owners. By investing in the cluster, PI groups not only receive exclusive access to the nodes they purchase, but also get access to all of the other owner compute nodes when they're not in use, thus giving them access to the whole breadth of Sherlock resources.

Why should I use Sherlock?#

Using Sherlock for your work provides many advantages over individual solutions: hosted in an on-premises, state-of-the-art datacenter dedicated to research computing systems, the Sherlock cluster is powered and cooled by installations that are optimized for scientific computing.

On Sherlock, simulations and workloads benefit from performance levels that only large scale HPC systems can offer: high-performance I/O infrastructure, petabytes of storage, large variety of hardware configurations, GPU accelerators, centralized system administration and management provided by Stanford Research Computing.

Such features are not easily accessible at the departmental level, and often require both significant initial investments and recurring costs. Joining Sherlock allows researchers and Faculty members to avoid those costs and benefit from economies of scale, as well as to access larger, professionally managed computing resources that what would not be available on an individual or even departmental basis.

How much does it cost?#

Sherlock is free to use for anyone doing departmental or sponsored research at Stanford.

Any Faculty member can request access for research purposes, and get an account with a base storage allocation and unlimited compute time on the global, shared pool of resources.

Stanford Research Computing provides faculty with the opportunity to purchase from a catalog a recommended compute node configurations, for the use of their research teams. Using a traditional compute cluster condominium model, participating faculty and their teams get priority access to the resources they purchase. When those resources are idle, other "owners" can use them, until the purchasing owner wants to use them. When this happens, those other owners jobs are re-queued to free up resources. Participating owner PIs also have shared access to the original base Sherlock nodes, along with everyone else.

How big is it?#

Quite big! It's actually difficult to give a definitive answer, as Sherlock is constantly evolving and expanding with new hardware additions.

As of May 2024, Sherlock features over 6,500 CPU cores available to all researchers, and more than 48,600 additional CPU cores available to Sherlock owners, faculty who have augmented the cluster with their own purchases. With a computing power over 5.4 Petaflops, Sherlock would have its place in the Top500 list of the 500 most powerful computer systems in the world.

For more details about Sherlock size and technical specifications, please refer to the tech specs section of the documentation. And for even more numbers and figures, see the Sherlock facts page.

OK, I'm sold, how do I start?#

You can request an account right now, take a look at the documentation, and drop us an email if you have any questions.

I want my own nodes!#

If you're interested in becoming an owner on Sherlock, and benefit from all the advantages associated, please take a look at the catalog of configurations, feel free to use the ordering form to submit your request, and we'll get back to you.

\ No newline at end of file diff --git a/ondemand/index.html b/ondemand/index.html new file mode 100644 index 000000000..de5e416c7 --- /dev/null +++ b/ondemand/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/order/index.html b/order/index.html new file mode 100644 index 000000000..57f07d053 --- /dev/null +++ b/order/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 000000000..876585bd4 --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-,:!=\\[\\]()\"/]+|\\.(?!\\d)|&[lg]t;|(?!\\b)(?=[A-Z][a-z])","pipeline":["stopWordFilter"],"fields":{"title":{"boost":1000.0},"text":{"boost":1.0},"tags":{"boost":1000000.0}}},"docs":[{"location":"","title":"Sherlock","text":""},{"location":"#what-is-sherlock","title":"What is Sherlock?","text":"

Sherlock is a shared computing cluster available for use by all Stanford Faculty members and their research teams, for sponsored or departmental faculty research. All research teams on Sherlock have access to a base set of managed computing resources, GPU-based servers, and a multi-petabyte, high-performance parallel file system for short-term storage.

Faculty can supplement these shared nodes by purchasing additional servers, and become Sherlock owners. By investing in the cluster, PI groups not only receive exclusive access to the nodes they purchase, but also get access to all of the other owner compute nodes when they're not in use, thus giving them access to the whole breadth of Sherlock resources.

"},{"location":"#why-should-i-use-sherlock","title":"Why should I use Sherlock?","text":"

Using Sherlock for your work provides many advantages over individual solutions: hosted in an on-premises, state-of-the-art datacenter dedicated to research computing systems, the Sherlock cluster is powered and cooled by installations that are optimized for scientific computing.

On Sherlock, simulations and workloads benefit from performance levels that only large scale HPC systems can offer: high-performance I/O infrastructure, petabytes of storage, large variety of hardware configurations, GPU accelerators, centralized system administration and management provided by Stanford Research Computing.

Such features are not easily accessible at the departmental level, and often require both significant initial investments and recurring costs. Joining Sherlock allows researchers and Faculty members to avoid those costs and benefit from economies of scale, as well as to access larger, professionally managed computing resources that what would not be available on an individual or even departmental basis.

"},{"location":"#how-much-does-it-cost","title":"How much does it cost?","text":"

Sherlock is free to use for anyone doing departmental or sponsored research at Stanford.

Any Faculty member can request access for research purposes, and get an account with a base storage allocation and unlimited compute time on the global, shared pool of resources.

Stanford Research Computing provides faculty with the opportunity to purchase from a catalog a recommended compute node configurations, for the use of their research teams. Using a traditional compute cluster condominium model, participating faculty and their teams get priority access to the resources they purchase. When those resources are idle, other \"owners\" can use them, until the purchasing owner wants to use them. When this happens, those other owners jobs are re-queued to free up resources. Participating owner PIs also have shared access to the original base Sherlock nodes, along with everyone else.

"},{"location":"#how-big-is-it","title":"How big is it?","text":"

Quite big! It's actually difficult to give a definitive answer, as Sherlock is constantly evolving and expanding with new hardware additions.

As of May 2024, Sherlock features over 6,500 CPU cores available to all researchers, and more than 48,600 additional CPU cores available to Sherlock owners, faculty who have augmented the cluster with their own purchases. With a computing power over 5.4 Petaflops, Sherlock would have its place in the Top500 list of the 500 most powerful computer systems in the world.

For more details about Sherlock size and technical specifications, please refer to the tech specs section of the documentation. And for even more numbers and figures, see the Sherlock facts page.

"},{"location":"#ok-im-sold-how-do-i-start","title":"OK, I'm sold, how do I start?","text":"

You can request an account right now, take a look at the documentation, and drop us an email if you have any questions.

"},{"location":"#i-want-my-own-nodes","title":"I want my own nodes!","text":"

If you're interested in becoming an owner on Sherlock, and benefit from all the advantages associated, please take a look at the catalog of configurations, feel free to use the ordering form to submit your request, and we'll get back to you.

"},{"location":"docs/","title":"Sherlock documentation","text":""},{"location":"docs/#welcome-to-sherlock","title":"Welcome to Sherlock!","text":"

Sherlock is a High-Performance Computing (HPC) cluster, operated by the Stanford Research Computing Center to provide computing resources to the Stanford community at large. You'll find all the documentation, tips, FAQs and information about Sherlock among these pages.

"},{"location":"docs/#why-use-sherlock","title":"Why use Sherlock?","text":"

Using Sherlock for your work provides many advantages over individual solutions: hosted in an on-premises, state-of-the-art datacenter, the Sherlock cluster is powered and cooled by installations that are optimized for scientific computing.

On Sherlock, simulations and workloads benefit from performance levels that only large scale HPC systems can offer: high-performance I/O infrastructure, petabytes of storage, large variety of hardware configurations, GPU accelerators, centralized system administration and management provided by the Stanford Research Computing.

Such features are not easily accessible at the departmental level, and often require both significant initial investments and recurring costs. Joining Sherlock allows researchers and faculty members to avoid those costs and benefit from economies of scale, as well as to access larger, professionally managed computing resources that what would not be available on an individual or even departmental basis.

"},{"location":"docs/#how-much-does-it-cost","title":"How much does it cost?","text":"

Sherlock is free to use for anyone doing departmental or sponsored research at Stanford. Any faculty member can request access for research purposes, and get an account with a base storage allocation and unlimited compute time on the global, shared pool of resources.

No CPU.hour charge

Unlike all Cloud Service Providers and many HPC systems, there is no usage charge on Sherlock.

When you submit your work on Sherlock, you don't need to keep an eye on the clock and worry about how much that run will cost you. There is no limit on the total amount of computing you can run on the cluster, as long as resources are available, and there's no charge to use them, no matter how large or small your computations are.

In case those free resources are not sufficient, Stanford Research Computing offers Faculty members the opportunity to invest into the cluster, and get access to additional computing resources for their research teams. Using a traditional compute cluster condominium model, participating faculty and their teams get priority access to the resources they purchase. When they're idle, those resources are available to use by other owners on the cluster, giving them access to virtually unlimited resources.

"},{"location":"docs/#information-sources","title":"Information sources","text":"

Searching the docs

If you're looking for information on a specific topic, the Search feature of this site will allow you to quickly find the page you're looking for. Just press S, F or / to open the Search bar and start typing.

To help users take their first steps on Sherlock, we provide documentation and information through various channels:

Channel URL Purpose Documentation You are here www.sherlock.stanford.edu/docs information to help new users start on Sherlock, and more in-depth documentation for users already familiar with the environment. Changelog news.sherlock.stanford.edu announces, news and updates about Sherlock. Dashboard status.sherlock.stanford.edu status of Sherlock's main components and services, outages, planned maintenance.

To get started, you can take a look at the concepts and glossary pages to get familiar with the terminology used throughout the documentation pages. Then, we recommend going through the following sections:

  • Prerequisites
  • Connecting to the cluster
  • Submitting jobs
"},{"location":"docs/#acknowledgment-citation","title":"Acknowledgment / citation","text":"

It is important and expected that publications resulting from computations performed on Sherlock acknowledge this. The following wording is suggested:

Acknowledgment

Some of the computing for this project was performed on the Sherlock cluster. We would like to thank Stanford University and the Stanford Research Computing Center for providing computational resources and support that contributed to these research results.

"},{"location":"docs/#support","title":"Support","text":""},{"location":"docs/#email-recommended","title":"Email (recommended)","text":"

Research Computing support can be reached by sending an email to srcc-support@stanford.edu and mentioning Sherlock.

How to submit effective support requests

To ensure a timely and relevant response, please make sure to include some additional details, such as job ids, commands executed and error messages received, so we can help you better. For more details, see the Troubleshooting page.

As a member of the Sherlock community, you're also automatically subscribed to the sherlock-announce mailing-list, which is only used by the Stanford Research Computing team to send important announcements about Sherlock.

"},{"location":"docs/#onboarding-sessions","title":"Onboarding sessions","text":"

We offer regular onboarding sessions for new Sherlock users.

On-boarding session times

On-boarding sessions are offered every first Wednesday of the month, 1PM-2PM PST, via Zoom

These one-hour sessions are a brief introduction to Sherlock's layout, its scheduler, the different file systems available on the cluster, as well as some job submission and software installation best practices for new users. They are a good intro course if you are new to Sherlock or HPC in general.

If you can't attend live on-boarding sessions, you can still take a look at the on-boarding slides as well as to this session recording.

"},{"location":"docs/#office-hours","title":"Office hours","text":"

Sending a question to srcc-support@stanford.edu is always the best first option for questions. That way you can include detailed descriptions of the problem or question, valuable output and error messages and any steps you took when you encountered your error. Also, everyone on our team will see your ticket, enabling the most appropriate group member to respond.

Office hours are a good place for more generalized questions about Sherlock, Slurm, Linux usage, data storage, queue structures/scheduling, job optimization and general capabilities of Sherlock. It's also useful for more technically nuanced questions that may not be easily answered with our ticketing system. In office hours some problems can indeed be solved quickly or progress can be made so that you can then work self-sufficiently towards a solution on your own.

COVID-19 update

We'll be holding remote office hours via Zoom, for the time being.

Office hours times

Click here to join the Sherlock Office Hours Zoom

  • Tuesday 10-11am
  • Thursday 3-4pm

You'll need a full-service SUNet ID (basically, a @stanford.edu email address) in order to authenticate and join Office Hours via Zoom. If you do not have a full service account, please contact us at srcc-support@stanford.edu.

If you can't make any of the Office Hours sessions, you can also make an appointment with Sherlock's support team.

"},{"location":"docs/#what-to-expect","title":"What to expect","text":"
  • We cannot accommodate walk-ins: we're unfortunately not staffed to welcome unscheduled visits, so please make sure that you're planning to stop by during office hours. We will not be able to help you otherwise.

  • We can rarely help with application-specific or algorithm problems.

  • You should plan your projects sufficiently in advance and not come to office hours at the last minute before a deadline. Sherlock is a busy resource with several thousand users and you should not expect your jobs to complete before a given date.

  • Not all questions and problems can be answered or solved during office hours, especially ones involving hardware, filesystem or network issues. Sherlock features several thousand computing, networking and storage components, that are constantly being monitored by our team. You can be sure that when Sherlock has an issue, we are aware of it and working on it.

"},{"location":"docs/#user-community","title":"User community","text":"

Sherlock is present on the Stanford Slack Grid, and you're more than welcome to join the following channels:

  • #sherlock-announce, for announcements related to Sherlock and its surrounding services,
  • #sherlock-users, as a place for Sherlock users to connect directly with each other. If you have general questions about Sherlock, want to reach out to other Sherlock users to share tips, good practices, tutorials or other info, please feel free to do so there.

For more details about the SRCC Slack Workspace, and instructions on how to join this workspace and its channels, please see the Stanford Research Computing support page.

Slack is not an official support channel

Please note that while Stanford Research Computing staff will monitor these channels, the official way to get support is still to email us at srcc-support@stanford.edu.

"},{"location":"docs/#quick-start","title":"Quick Start","text":"

If you're in a rush1, here's a 3-step ultra-quick start:

  1. connect to Sherlock
$ ssh login.sherlock.stanford.edu\n
  1. get an interactive session on a compute node
[kilian@sh-ln01 login! ~]$ sh_dev\n
  1. run a command
[kilian@sh02-01n58 ~]$ module load python\n[kilian@sh02-01n58 ~]$ python -c \"print('Hello Sherlock')\"\nHello Sherlock\n

Congrats! You ran your first job on Sherlock!

"},{"location":"docs/#replay","title":"Replay","text":"

Here's what it looks like in motion:

  1. even in a rush, you'll still need an account on the cluster. See the Prerequisites page for details.\u00a0\u21a9

"},{"location":"docs/concepts/","title":"Concepts","text":""},{"location":"docs/concepts/#sherlock-a-shared-resource","title":"Sherlock, a shared resource","text":"

Sherlock is a shared compute cluster available for use by all Stanford faculty members and their research teams to support departmental or sponsored research.

Sherlock is a resource for research

Sherlock is not suitable for course work, class assignments or general-use training sessions.

Users interested in using computing resources in such contexts are encouraged to investigate FarmShare, Stanford\u2019s community computing environment, which is primarily intended for supporting coursework.

It is open to the Stanford community as a computing resource to support departmental or sponsored research, thus a faculty member's sponsorship is required for all user accounts.

Usage policy

Please note that your use of this system falls under the \"Computer and Network Usage Policy\", as described in the Stanford Administrative Guide. In particular, sharing authentication credentials is strictly prohibited. Violation of this policy will result in termination of access to Sherlock.

Sherlock is designed, deployed, maintained and operated by Stanford Research Computing staff. Stanford Research Computing is a joint effort of the Dean of Research and IT Services to build and support a comprehensive program to advance computational research at Stanford.

Sherlock has been initially purchased and supported with seed funding from Stanford's Provost. It comprises a set of freely available compute nodes, a few specific resources such as large-memory machines and GPU servers, as well as the associated networking equipment and storage. These resources can be used to run computational codes and programs, and are managed through a job scheduler using a fair-share algorithm.

"},{"location":"docs/concepts/#data-risk-classification","title":"Data risk classification","text":"

Low and Moderate Risk data

Sherlock is approved for computing with Low and Moderate Risk data only.

High Risk data

Sherlock is NOT approved to store or process HIPAA, PHI, PII nor any kind of High Risk data. The system is approved for computing with Low and Moderate Risk data only, and is not suitable to process High Risk data.

Users are responsible for ensuring the compliance of their own data.

For more information about data risk classifications, see the Information Security Risk Classification page.

"},{"location":"docs/concepts/#investing-in-sherlock","title":"Investing in Sherlock","text":"

For users who need more than casual access to a shared computing environment, Sherlock also offers Faculty members the possibility to invest in additional, dedicated computing resources.

Unlike traditional clusters, Sherlock is a collaborative system where the majority of nodes are purchased and shared by the cluster users. When a user (typically a PI) purchases one or more nodes, they become an owner. Owners choose from a standard set of server configurations supported by Stanford Research Computing (known as the Sherlock catalog) to add to the cluster.

When they're not in use, PI-purchased compute nodes can be used by other owners. This model also allows Sherlock owners to benefit from the scale of the cluster by giving them access to more compute nodes than their individual purchase, which gives them much greater flexibility than owning a standalone cluster.

The majority of Sherlock nodes are owners nodes

The vast majority of Sherlock's compute nodes have been purchased by individual PIs and groups, and PI purchases are the main driver behind the rapid expansion of the cluster, which went from 120 nodes to more than 1,000 nodes in less than 3 years.

The resource scheduler configuration works like this:

  • owners and their research teams get immediate and exclusive access to the resources they purchased,
  • when those nodes are idle, other owners can use them,
  • when the purchasing owners want to use their resources, jobs from other owners that may be running on them are preempted (ie. killed and re-queued).

This provides a way to get more resources to run less important jobs in the background, while making sure that an owner always gets immediate access to his/her own nodes.

Participating owners also have shared access to the public, shared Sherlock nodes, along with everyone else.

"},{"location":"docs/concepts/#benefits","title":"Benefits","text":"

Benefits to owners include:

no wait time in queue: immediate and exclusive access to the purchased nodes

access to more resources: possibility to submit jobs to the other owners' nodes when they're not in use

Compared to hosting and managing computing resources on your own, purchasing nodes on Sherlock provides:

  • data center hosting, including backup power and cooling
  • system configuration, maintenance and administration
  • hardware diagnostics and repairs

Those benefits come in addition to the other Sherlock advantages:

  • access to high-performance, large parallel scratch storage space
  • access to snapshot'ed, replicated, enterprise-class storage space
  • optimized software stack, especially tailored for a range of research needs
  • tools to build and install additional software applications as needed
  • user support
"},{"location":"docs/concepts/#limitations","title":"Limitations","text":"

Purchasing nodes on Sherlock is different from traditional server hosting.

In particular, purchasing your own compute nodes on Sherlock will NOT allow:

root access: owner nodes on Sherlock are still managed by Stanford Research Computing staff in accordance with Stanford's Minimum Security Standards. Although users are welcome to install (or request) any software they may need, purchasing compute nodes on Sherlock does not allow root access to the nodes.

running permanent services: permanent processes such as web servers or databases can only run on owner nodes through the scheduler, using recurring or persistent jobs. Purchasing compute nodes on Sherlock does not provide a way to run anything that couldn't run on freely-available nodes.

direct network connectivity: owners' nodes are connected to the Sherlock's internal network and are not directly accessible from the outside, which means that they can't host public services like web or application servers.

bypassing the scheduler: jobs running on owners' nodes still need to be submitted to the scheduler. Direct shell access to the nodes is not possible outside of scheduled interactive sessions.

hardware changes: the hardware components of purchased nodes cannot be modified, removed, swapped or upgraded during the nodes' service lifetime.

configuration: the configuration of purchased nodes is tuned to provide optimal performance over a majority of use cases and applications, is identical on all nodes across the cluster, and cannot be changed, modified or altered in any way.

persistent local storage: local storage space provided on the compute nodes is only usable for the duration of a job and cannot be used to store long-term data.

additional storage space: purchasing compute nodes on Sherlock does not provide additional storage space. Please note that Stanford Research Computing does offer the possibility for PIs to purchase their own storage space on Oak, for their long-term research data needs.

"},{"location":"docs/concepts/#purchasing-nodes","title":"Purchasing nodes","text":"

If you are interested in becoming an owner, you can find the latest information about ordering Sherlock nodes on the ordering page. Feel free to contact us is you have any additional question.

"},{"location":"docs/concepts/#cluster-generations","title":"Cluster generations","text":"

The research computing landscape evolves very quickly, and to both accommodate growth and technological advances, it's necessary to adapt the Sherlock environment to these evolutions.

Every year or so, a new generation of processors is released, which is why, over a span of several years, multiple generations of CPUs and GPUs make their way into Sherlock. This provides users with access to the latest features and performance enhancements, but it also adds some heterogeneity to the cluster, which is important to keep in mind when compiling software and requesting resources to run them.

Another key component of Sherlock is the interconnect network that links all of Sherlock's compute nodes together and act as a backbone for the whole cluster. This network fabric is of finite capacity, and based on the individual networking switches characteristics and the typical research computing workflows, it can accommodate up to about 850 compute nodes.

As nodes get added to Sherlock, the number of available ports decreases, and at some point, the fabric gets full and no more nodes can be added. Sherlock reached that stage for the first time in late 2016, which prompted the installation of a whole new fabric, to allow for further system expansion.

This kind of evolution is the perfect opportunity to upgrade other components too: management software, ancillary services architecture and user applications. In January 2017, those components were completely overhauled and a new, completely separate cluster was kick-started, using using a different set of hardware and software, while conserving the same storage infrastructure, to ease the transition process.

After a transition period, the older Sherlock hardware, compute and login nodes, have been be merged in the new cluster, and from a logical perspective (connection, job scheduling and computing resources), nodes attached to each of the fabrics have been reunited to form a single cluster again.

As Sherlock continues to evolve and grow, the new fabric will also approach capacity again, and the same process will happen again to start the next generation of Sherlock.

"},{"location":"docs/concepts/#maintenances-and-upgrades","title":"Maintenances and upgrades","text":"

Stanford Research Computing institutes a monthly scheduled maintenance window on Sherlock, to ensure optimal operation, avoid potential issues and prepare for future expansions. This window will be used to make hardware repairs, software and firmware updates, and perform general manufacturer recommended maintenance on our environment.

As often as possible, maintenance tasks are performed in a rolling, non-disruptive fashion, but downtimes are sometimes an unfortunate necessity to allow disruptive operations that can't be conducted while users are working on the system.

Maintenance schedule

As often as possible, maintenances will take place on the first Tuesday of every month, from 08:00 to 12:00 Pacific time (noon), and will be announced 2 weeks in advance, through the usual communication channels.

In case an exceptional amount of work is required, the maintenance window could be extended to 10 hours (from 08:00 to 18:00).

During these times, access to Sherlock will be unavailable, login will be disabled and jobs won't run. A reservation will be placed in the scheduler so running jobs can finish before the maintenance, and jobs that wouldn't finish by the maintenance window would be pushed after it.

"},{"location":"docs/concepts/#common-questions","title":"Common questions","text":"

Q: Why doing maintenances at all?

A: Due to the scale of our computing environment and the increasing complexity of the systems we deploy, it is prudent to arrange for a regular time when we can comfortably and without pressure fix problems or update facilities with minimal impact to our customers. Most, if not all, major HPC centers have regular maintenance schedules. We also need to enforce the Minimum Security rules instituted by the Stanford Information Security Office, which mandate deployment of security patches in a timely manner.

Q: Why Tuesdays 08:00-12:00? Why not do this late at night?

A: We have observed that the least busy time for our services is at the beginning of the week in the morning hours. Using this time period should not interrupt most of our users. If the remote possibility of a problem that extends past the scheduled downtime occurs, we would have our full staff fresh and available to assist in repairs and quickly restore service.

Q: I have jobs running, what will happen to them?

A: For long-running jobs, we strongly recommend checkpointing your results on a periodic basis. Besides, we will place a reservation in the scheduler for each maintenance that would prevent jobs to run past it. This means that the scheduler will only allow jobs to run if they can finish by the time the maintenance starts. If you submit a long job soon before the maintenance, it will be delayed until after the maintenance. That will ensure that no work is lost when the maintenance starts.

"},{"location":"docs/credits/","title":"About us","text":""},{"location":"docs/credits/#stanford-research-computing","title":"Stanford Research Computing","text":"

Stanford Research Computing) is a joint effort of the Dean of Research and IT Services to build and support a comprehensive program to advance computational research at Stanford. That includes offering and supporting traditional high performance computing (HPC) systems, as well as systems for high throughput and data-intensive computing.

The Stanford Research Computing team also helps researchers transition their analyses and models from the desktop to more capable and plentiful resources, providing the opportunity to explore their data and answer research questions at a scale typically not possible on desktops or departmental servers. Partnering with national initiatives and program as well as vendors, Stanford Research Computing offers training and learning opportunities around HPC tools and technologies.

For more information, please see the Stanford Research Computing website

"},{"location":"docs/credits/#credits","title":"Credits","text":"

We would like to thank the following companies for their generous sponsorship, and for providing services and resources that help us manage Sherlock every day:

  • GitHub
  • Hund
  • Noticeable

The Sherlock website and documentation also rely on the following projects:

  • MkDocs
  • Material for MkDocs
"},{"location":"docs/credits/#why-the-sherlock-name","title":"Why the Sherlock name?","text":"

If you're curious about where the Sherlock name came from, we always considered that computing resources in general and HPC clusters in particular should be the catalyst of innovation, be ahead of their time, and spur new discoveries.

And what better account of what's happening on a high-performance computing cluster than Benedict Cumberbatch describing his role as Sherlock Holmes in the BBC's modern adaptation of Arthur Conan Doyle's classic?

Benedict Cumberbatch, about Sherlock

There's a great charge you get from playing him, because of the volume of words in your head and the speed of thought \u2013 you really have to make your connections incredibly fast. He is one step ahead of the audience, and of anyone around him with normal intellect. They can't quite fathom where his leaps are taking him.

Yes, exactly. That's Sherlock.

"},{"location":"docs/credits/#sherlock-of-hbo-fame","title":"Sherlock, of HBO fame","text":"

And finally, we couldn't resist to the pleasure of citing the most prestigious accomplishment of Sherlock to date: a mention in HBO's Silicon Valley Season 4 finale!

Yep, you got that right, Richard Hendricks wanted to use our very own Sherlock!

{ align=left style=\"height:100px; margin-top:0\" } Kudos to the show's crew and a big thank you to HBO Data compression stars, Professor Tsachy Weissman and Dmitri Pavlichin, for this incredible Sherlock shout-out. This has been an everlasting source of pride and amazement for the whole SRCC team!

"},{"location":"docs/glossary/","title":"Glossary","text":""},{"location":"docs/glossary/#whats-a-cluster","title":"What's a cluster?","text":"

A computing cluster is a federation of multiple compute nodes (independent computers), most commonly linked together through a high-performance interconnect network.

What makes it a \"super-computer\" is the ability for a program to address resources (such as memory, CPU cores) located in different compute nodes, through the high-performance interconnect network.

On a computing cluster, users typically connect to login nodes, using a secure remote login protocol such as SSH. Unlike in traditional interactive environments, users then need to prepare compute jobs to submit to a resource scheduler. Based on a set of rules and limits, the scheduler will then try to match the jobs' resource requirements with available resources such as CPUs, memory or computing accelerators such as GPUs. It will then execute the user defined tasks on the selected resources, and generate output files in one of the different storage locations available on the cluster, for the user to review and analyze.

"},{"location":"docs/glossary/#cluster-components","title":"Cluster components","text":"

The terms that are typically used to describe cluster components could be confusing, so in an effort to clarify things, here's a schema of the most important ones, and their definition.

"},{"location":"docs/glossary/#cpu","title":"CPU","text":"A Central Processing Unit (CPU), or core, or CPU core, is the smallest unit in a microprocessor that can carry out computational tasks, that is, run programs. Modern processors typically have multiple cores."},{"location":"docs/glossary/#socket","title":"Socket","text":"A socket is the connector that houses the microprocessor. By extension, it represents the physical package of a processor, that typically contains multiple cores."},{"location":"docs/glossary/#node","title":"Node","text":"A node is a physical, stand-alone computer, that can handle computing tasks and run jobs. It's connected to other compute nodes via a fast network interconnect, and contains CPUs, memory and devices managed by an operating system."},{"location":"docs/glossary/#cluster","title":"Cluster","text":"A cluster is the complete collection of nodes with networking and file storage facilities. It's usually a group of independent computers connected via a fast network interconnect, managed by a resource manager, which acts as a large parallel computer."},{"location":"docs/glossary/#other-commonly-used-terms","title":"Other commonly used terms","text":"

To make this documentation more accessible, we try to explain key terms in a non-technical way. When reading these pages, please keep in mind the following definitions, presented in alphabetical order:

"},{"location":"docs/glossary/#application","title":"Application","text":"An application is a computer program designed to perform a group of coordinated functions, tasks, or activities for the benefit of the user. In the context of scientific computing, an application typically performs computations related to a scientific goal (molecular dynamics simulations, genome assembly, compuational fluid dynamics simulations, etc)."},{"location":"docs/glossary/#backfill","title":"Backfill","text":"Backfill scheduling is a method that a scheduler can use in order to maximize utilization. It allows smaller (both in terms of size and time requirements), lower priority jobs to start before larger, higher priority ones, as long as doing so doesn't push back the higher-priority jobs expected start time."},{"location":"docs/glossary/#executable","title":"Executable","text":"A binary (or executable) program refers to the machine-code compiled version of an application. This is which is a binary file that a computer can execute directly. As opposed to the application source code, which is the human-readable version of the application internal instructions, and which needs to be compiled by a compiler to produce the executable binary."},{"location":"docs/glossary/#fairshare","title":"Fairshare","text":"A resource scheduler ranks jobs by priority for execution. Each job's priority in queue is determined by multiple factors, among which one being the user's fairshare score. A user's fairshare score is computed based on a target (the given portion of the resources that this user should be able to use) and the user's effetive usage, ie the amount of resources (s)he effectively used in the past. As a result, the more resources past jobs have used, the lower the priority of the next jobs will be. Past usage is computed based on a sliding window and progressively forgotten over time. This enables all users on a shared resource to get a fair portion of it for their own use, by giving higher priority to users who have been underserved in the past."},{"location":"docs/glossary/#flops","title":"FLOPS","text":"Floating-point Operations Per Second (FLOPS) are a measure of computing performance, and represent the number of floating-point operations that a CPU can perform each second. Modern CPUs and GPUs are capable of doing TeraFLOPS (10^12 floating-point operations per second), depending on the precision of those operations (half-precision: 16 bits, single-precision: 32 bits, double-precision: 64 bits)."},{"location":"docs/glossary/#gpu","title":"GPU","text":"A Graphical Processing Unit (GPU) is a specialized device initially designed to generate graphical output. On modern computing architecture, they are used to accelerate certain types of computation, which they are much faster than CPUs at. GPUs have their own memory, and are attached to CPUs, within a node. Each compute node can host one or more GPUs."},{"location":"docs/glossary/#hpc","title":"HPC","text":"High Performance Computing (HPC) refers to the practice of aggregating computing power to achieve higher performance that would be possible by using a typical computer."},{"location":"docs/glossary/#infiniband","title":"Infiniband","text":"Infiniband is a networking standard that features high bandwidth and low latency. The current Infiniband devices are capable of transferring data at up to 200 Gbits/sec with less than a microsecond latency. As of this writing, the popular Infiniband versions are HDR (High Data Rate) with 200 Gbits/sec and EDR (Enhanced Data Rate) with 100 Gbits/sec."},{"location":"docs/glossary/#iops","title":"IOPS","text":"Input/output operations per second (IOPS, pronounced eye-ops) is an input/output performance measurement used to characterize computer storage system performance."},{"location":"docs/glossary/#job","title":"Job","text":"A job, or batch job, is the scheduler\u2019s base unit of computing by which resources are allocated to a user for a specified amount of time. Users create job submission scripts to ask the scheduler for resources such as cores, memory, runtime, etc. The scheduler puts the requests in a queue and allocates requested resources based on jobs\u2019 priority."},{"location":"docs/glossary/#job-step","title":"Job step","text":"Job steps are sets of (possibly parallel) tasks within a job"},{"location":"docs/glossary/#login-nodes","title":"Login nodes","text":"

Login nodes are points of access to a compute cluster. Users usually connect to login nodes via SSH to compile and debug their code, review their results, do some simple tests, and submit their batch jobs to the parallel computer.

Login nodes are not for computing

Login nodes are usually shared among many users and therefore must not be used to run computationally intensive tasks. Those should be submitted to the scheduler which will dispatch them on compute nodes.

"},{"location":"docs/glossary/#modules","title":"Modules","text":"Environment modules, or software modules, are a type of software management tool used on in most HPC environments. Using modules enable users to selectively pick the software that they want to use and add them to their environment. This allows to switch between different versions or flavors of the same software, pick compilers, libraries and software components and avoid conflicts between them."},{"location":"docs/glossary/#mpi","title":"MPI","text":"Message Passing Interface (MPI) is a standardized and portable message-passing system designed to exchange information between processes running on different nodes. There are several implementations of the MPI standard, which is the most common way used to scale parallel applications beyond a single compute node."},{"location":"docs/glossary/#openmp","title":"OpenMP","text":"Open Multi Processing (OpenMP) is a parallel programming model designed for shared memory architecture. It's based on pragmas that can be added in applications to let the compiler generate a code that can run on multiple cores, within the same node."},{"location":"docs/glossary/#partition","title":"Partition","text":"

A partition is a set of compute nodes within a cluster with a common feature. For example, compute nodes with GPU, or compute nodes belonging to same owner, could form a partition.

On Sherlock, you can see detailed partition information with the sh_part or sinfo commands.

"},{"location":"docs/glossary/#qos","title":"QOS","text":"A Quality Of Service (QOS) is the set of rules and limitations that apply to a categories of job. The combination of a partition (set of machines where a job can run) and QOS (set of rules that applies to that job) makes what is often referred to as a scheduler queue."},{"location":"docs/glossary/#run-time","title":"Run time","text":"The run time, or walltime, of a job is the time required to finish its execution."},{"location":"docs/glossary/#scheduler","title":"Scheduler","text":"The goal of a job scheduler is to find the appropriate resources to run a set of computational tasks in the most efficient manner. Based on resource requirements and job descriptions, it will prioritize those jobs, allocate resources (nodes, CPUs, memory) and schedule their execution."},{"location":"docs/glossary/#slurm","title":"Slurm","text":"Simple Linux Utility for Resource Management (SLURM) is a software that manages computing resources and schedule tasks on them. Slurm coordinates running of many programs on a shared facility and makes sure that resources are used in an optimal manner."},{"location":"docs/glossary/#ssh","title":"SSH","text":"Secure Shell (SSH) is a protocol to securely access remote computers. Based on the client-server model, multiple users with an SSH client can access a remote computer. Some operating systems such as Linux and Mac OS have a built-in SSH client and others can use one of many publicly available clients."},{"location":"docs/glossary/#thread","title":"Thread","text":"A process, in the simplest terms, is an executing program. One or more threads run in the context of the process. A thread is the basic unit to which the operating system allocates processor time. A thread can execute any part of the process code, including parts currently being executed by another thread. Threads are co-located on the same node."},{"location":"docs/glossary/#task","title":"Task","text":"In the Slurm context, a task is to be understood as a process. A multi-process program is made of several tasks. A task is typically used to schedule a MPI process, that in turn can use several CPUs. By contrast, a multi-threaded program is composed of only one task, which uses several CPUs."},{"location":"docs/orders/","title":"Ordering nodes on Sherlock","text":"

For research groups needing access to additional, dedicated computing resources on Sherlock, we offer the possibility for PIs to purchase their own compute nodes to add to the cluster.

Operating costs for managing and housing PI-purchased compute nodes are waived in exchange for letting other users make use of any idle compute cycles on the PI-owned nodes. Owners have priority access to the computing resources they purchase, but can access more nodes for their research if they need to. This provides the PI with much greater flexibility than owning a standalone cluster.

"},{"location":"docs/orders/#conditions","title":"Conditions","text":""},{"location":"docs/orders/#service-term","title":"Service term","text":"

Compute nodes are purchased for a duration of 4 years

Compute nodes are purchased and maintained based on a 4-year lifecycle, which is the duration of the equipment warranty and vendor support.

Owners will be notified during the 4th year that their nodes' lifetime is about to reach its term, at which point they'll be welcome to either:

  • renew their investment by purchasing new nodes,
  • continue to use the public portion of Sherlock's resources.

At the end of their service term, compute nodes are physically retired from the cluster, to make room for new equipment. Compute nodes may be kept running for an additional year at most after the end of their service term, while PIs plan for equipment refresh. Nodes failing during this period may not be repaired, and failed hardware will be disabled or removed from the system.

Please note that outside of exceptional circumstances, nodes purchased in Sherlock cannot be removed from cluster before the end of their service term.

"},{"location":"docs/orders/#shared-ownership","title":"Shared ownership","text":"

Minimum order of one node per PI

The number of nodes in a shared order must be greater or equal to the number of purchasing PI groups.

For operational, administrative as well as usability reasons, we do not support shared ownership of equipment. Meaning that multiple PI groups cannot purchase and share a single compute node. Shared orders have a minimum of one node per purchasing PI group.

"},{"location":"docs/orders/#compute-nodes-catalog","title":"Compute nodes catalog","text":"

Stanford Research Computing offers a select number of compute node configurations that have been tested and validated on Sherlock and that aim to cover most computing needs.

Sherlock catalog

Complete details are available in the Sherlock compute nodes catalog 3

"},{"location":"docs/orders/#configurations","title":"Configurations","text":"

We try to provide hardware configurations that can cover the needs and requirements of a wide range of computing applications, in various scientific fields, and to propose a spectrum of pricing tiers, as shown in the table below:

Type Description Recommended usage Price range CBASE Base configuration Best per-core performance for serial applications, multi-threaded (OpenMP) and distributed (MPI) applications. Most flexible and cost-effective configuration $ CPERF High-core count configuration Multi-threaded applications requiring higher numbers of CPU cores $$ CBIGMEM Large-memory configuration Serial or multi-threaded applications requiring terabytes of memory (genome assembly, etc...) $$$$ G4FP32 Base GPU configuration Single-precision (FP32) GPU-accelerated applications (CryoEM, MD...) with low GPU memory requirements $$ G4FP64 HPC GPU configuration AI, ML/DL and GPU-accelerated HPC codes requiring double-precision (FP64) and larger amounts of GPU memory $$$ G4TF64G8TF64 Best-in-class GPU configuration AI, ML/DL and GPU-accelerated HPC codes requiring double-precision (FP64), large amounts of GPU memory, and heavy multi-GPU scaling $$$$ Choosing the best node configuration for your needs

Although some configurations may appear cheaper when looking at the dollar/core ratio, this is not the only point to consider when determining the best configuration for your workload.

Performance per core

There are other factors to take into account, notably the memory and I/O bandwidth per core, which could be lower on higher core-count configurations like CPERF. With multiple times more cores than CBASE, they still provide the same total amount of bandwidth to remote and local storage, as well as, to a lesser extend, to memory. Higher core-count CPUs also often offer lower core frequencies, which combined with less bandwidth per core, may result in lower performance for serial jobs.

CPERF nodes are an excellent fit for multi-threaded applications that don't span multiple nodes. But for more diverse workloads, they don't offer the same level of flexibility than the CBASE nodes, which can run a mix of serial, multi-threaded and MPI applications equally well.

Resources availability

Another important factor to take into account is that less nodes for a given number of cores offers less resilience against potential hardware failures: if a 128-core node becomes unavailable for some reason, that's 128 cores that nobody can use while the node is being repaired. But with 128 cores in 4x 32-core nodes, if a node fails, there are still 96 cores that can be used.

We'll be happy to help you determine the best configuration for your computing needs, feel free to reach out to schedule a consultation.

Configuration details for the different compute node types are listed in the Sherlock compute nodes catalog 3

"},{"location":"docs/orders/#prices","title":"Prices","text":"

Prices for the different compute node types are listed in the Sherlock compute nodes catalog 3. They include tax and shipping fees, and are subject to change when quoted: they tend to follow the market-wide variations induced by global political and economical events, which are way outside of our control. Prices are provided there as a guideline for expectations.

There are two components in the cost of a compute node purchase:

  1. the cost of the hardware itself (capital purchase),

  2. a one-time, per-node infrastructure fee1 that will be charged to cover the costs of connecting the nodes to the cluster infrastructure (racks, PDUs, networking switches, cables...)

No recurring fees

There is currently no recurring fee associated with purchasing compute nodes on Sherlock. In particular, there is no CPU.hour charge, purchased nodes are available to their owners 100% of the time, at no additional cost.

Currently, there are no user, administrative or management fees associated with ongoing system administration of the Sherlock environment. However, PIs should anticipate the eventuality of modest system administration and support fees being levied within the 4 year lifetime of their compute nodes.

"},{"location":"docs/orders/#purchasing-process","title":"Purchasing process","text":"

Minimum purchase

Please note that the minimum purchase is one physical server per PI group. We cannot accommodate multiple PIs pooling funds for a single node.

Single-node orders may incur additional delays

Some node configurations need to be ordered from the vendor by sets of 4 nodes (see the Sherlock catalog for details). So orders for quantities non-multiples of 4 need will to be grouped with other PI's orders, which may incur additional delays.

Purchasing nodes on Sherlock is usually a 5-step process:

  1. the PI use the order form to submit an order,
  2. Stanford Research Computing requests a formal vendor quote to finalize pricing and communicate it back to the PI for approval,
  3. Stanford Research Computing submits a Stanford PO to the vendor,
  4. Stanford Research Computing takes delivery of the hardware and proceeds to its installation,
  5. Stanford Research Computing notifies the PI that their nodes are ready to be used.

The typical delay between a PO submission to the vendor and the availability of the compute nodes to the PIs is usually between 4 and 8 weeks.

Supply chain disruption and component shortages

Global supply chain issues and component shortages have considerably increased lead times, and compute node deliveries are currently in the 6-month range.

"},{"location":"docs/orders/#required-information","title":"Required information","text":"

To place an order, we'll need the following information:

  • The SUNet ID of the PI making the purchase request
  • A PTA2 number to charge the hardware (capital) portion of the purchase
  • A PTA2 number to charge the per-node infrastructure fees (non-capital) It could be the same PTA used for the capital portion of the purchase, or a different one

Hardware costs could be spread over multiple PTAs (with a maximum of 2 PTAs per order). But please note that the infrastructure fees have to be charged to a single PTA.

"},{"location":"docs/orders/#placing-an-order","title":"Placing an order","text":"

To start ordering compute nodes for Sherlock:

check the Sherlock catalog 3 to review prices and select your configurations

Choose

fill in the order form 3 to submit your request and provide the required information

Order

And we'll be in touch shortly!

  1. infrastructure fees are considered non-capital for cost accounting purposes and may incur indirect cost burdens on cost-reimbursable contracts and grants.\u00a0\u21a9

  2. PTA is an acronym used for a Project-Task-Award combination representing an account in the Stanford Financial system.\u00a0\u21a9\u21a9

  3. SUNet ID required, document restricted to @stanford.edu accounts.\u00a0\u21a9\u21a9\u21a9\u21a9\u21a9

"},{"location":"docs/tags/","title":"Tags","text":"

Here is a list of documentation tags:

"},{"location":"docs/tags/#tag:advanced","title":"advanced","text":"
  • Node features
"},{"location":"docs/tags/#tag:connection","title":"connection","text":"
  • Connecting
  • Connection options
  • Data transfer
"},{"location":"docs/tags/#tag:slurm","title":"slurm","text":"
  • Job management
  • Node features
  • Running jobs
  • Submitting jobs
"},{"location":"docs/tags/#tag:tech","title":"tech","text":"
  • Facts
  • Technical specifications
"},{"location":"docs/advanced-topics/connection/","title":"Advanced connection options","text":"","tags":["connection"]},{"location":"docs/advanced-topics/connection/#login-nodes","title":"Login nodes","text":"

Sherlock login nodes are regrouped behind a single DNS alias: login.sherlock.stanford.edu.

This alias provides a load-balanced login environment, and the assurance that you will be connected to the least loaded login node when you connect to Sherlock.

If for any reason, you want to directly connect to a specific login node and bypass the automatic load-balanced dispatching of new connections (which we don't recommend), you can use that login node's hostname explicitly. For instance:

$ ssh <sunetid>@ln21.sherlock.stanford.edu\n

This can be useful if you run long-standing processes on the login nodes, such as screen or tmux sessions. To find them back when you reconnect to Sherlock, you will indeed need to login to the same login node you started them on.

The drawback is that by connecting to a specific login node, you will forfeit the load-balancing benefits, which could result in a crowded environment, or even in login errors in case that specific login node is unavailable.

","tags":["connection"]},{"location":"docs/advanced-topics/connection/#authentication-methods","title":"Authentication methods","text":"

Public-key authentication

SSH public-key authentication is not supported on Sherlock.

","tags":["connection"]},{"location":"docs/advanced-topics/connection/#password-recommended","title":"Password (recommended)","text":"

The recommended way to authenticate to Sherlock is to simply use your SUNet ID and password, as described in the Connecting page.

Passwords are not stored on Sherlock. Sherlock login nodes will delegate password authentication to the University central Kerberos service.

","tags":["connection"]},{"location":"docs/advanced-topics/connection/#gssapi","title":"GSSAPI","text":"

For compatibility with previous generations of Sherlock, GSSAPI1 authentication is still allowed, and could be considered a more convenient option, as this mechanism doesn't require entering your password for each connection.

GSSAPI authentication relies on a token system, where users obtain Kerberos ticket-granting tickets, transmit them via SSH to the server they want to connect to, which will, in turn, verify their validity. That way, passwords are never stored locally, and never transit over the network. That's why Kerberos is usually considered the most secure method to authenticate.

To connect using GSSAPI on Sherlock, you'll need to go through a few steps2:

  1. make sure the Kerberos user tools are installed on your local machine. You'll need the kinit (and optionally klist and kdestroy) utilities. Please refer to your OS documentation to install them if required.

  2. download and install the Stanford krb5.conf file, which contains information about the Stanford Kerberos environment:

    $ sudo curl -o /etc/krb5.conf https://web.stanford.edu/dept/its/support/kerberos/dist/krb5.conf\n
  3. configure your SSH client, by modifying (or creating if it doesn't exist already) the .ssh/config file in your home directory on your local machine. Using a text editor, you can add the following lines to your ~/.ssh/config file (indentation is important):

    Host login.sherlock.stanford.edu\n    GSSAPIDelegateCredentials yes\n    GSSAPIAuthentication yes\n

Once everything is in place (you only need to do this once), you'll be able to test that your Kerberos installation works by running kinit <sunetid>@stanford.edu. You should get a password prompt, and upon success, you'll be able to list your Kerberos credentials with the klist command:

$ kinit kilian@stanford.edu\nPassword for kilian@stanford.edu:\n$ klist\nTicket cache: FILE:/tmp/krb5cc_215845_n4S4I6KgyM\nDefault principal: kilian@stanford.edu\n\nValid starting     Expires            Service principal\n07/28/17 17:33:54  07/29/17 18:33:32  krbtgt/stanford.edu@stanford.edu\n        renew until 08/04/17 17:33:32\n

Kerberos ticket expiration

Kerberos tickets have a 25-hour lifetime. So you'll need to run the kinit command pretty much once a day to continue being able to authenticate to Sherlock.

Please note that when your Kerberos ticket expire, existing Sherlock connections will not be interrupted. So you'll be able to keep connections open to Sherlock for several days without any issue.

You're now ready to connect to Sherlock using GSSAPI. Simply SSH as usual:

$ ssh <sunetid>@login.sherlock.stanford.edu\n

and if everything goes well, you should directly see the two-factor (Duo) prompt, without having to enter your password.

If you want to destroy your Kerberos ticket before its expiration, you can use the kdestroy command.

","tags":["connection"]},{"location":"docs/advanced-topics/connection/#ssh-options","title":"SSH options","text":"

OpenSSH offers a variety of configuration options that you can use in ~/.ssh/config on your local computer. The following section describe some of the options you can use with Sherlock that may make connecting and transferring files more convenient.

","tags":["connection"]},{"location":"docs/advanced-topics/connection/#avoiding-multiple-duo-prompts","title":"Avoiding multiple Duo prompts","text":"

In order to avoid getting a second-factor (Duo) prompt every time you want to open a new connection to Sherlock, you can take advantage of the multiplexing features provided by OpenSSH.

Simply add the following lines to your ~/.ssh/config file on your local machine to activate the ControlMaster option. If you already have a Host login.sherlock.stanford.edu block in your configuration file, simply add the Control* option lines in the same block.

Host login.sherlock.stanford.edu\n    ControlMaster auto\n    ControlPath ~/.ssh/%l%r@%h:%p\n

It will allow SSH to re-use an existing connection to Sherlock each time you open a new session (create a new SSH connection), thus avoiding subsequent 2FA prompts once the initial connection is established.

The slight disadvantage of this approach is that once you have a connection open to one of Sherlock's login nodes, all your subsequent connections will be using the same login node. This will somewhat defeat the purpose of the load-balancing mechanism used by the login nodes.

Connection failure with unix_listener error

If your connection fails with the following error message:

unix_listener: \"...\" too long for Unix domain socket\n
you're being hit by a macOS limitation, and you should replace the ControlPath line above by:
ControlPath ~/.ssh/%C\n

","tags":["connection"]},{"location":"docs/advanced-topics/connection/#connecting-from-abroad","title":"Connecting from abroad","text":"

VPN

As a good security practice, we always recommend to use the Stanford VPN when connecting from untrusted networks.

Access to Sherlock is not restricted to campus, meaning that you can connect to Sherlock from pretty much anywhere, including when traveling abroad. We don't restrict inbound SSH connections to any specific IP address range or geographical location, so you shouldn't have any issue to reach the login nodes from anywhere.

Regarding two-step authentication, University IT provides alternate authentication options when phone service or Duo Mobile push notifications are not available.

  1. The Generic Security Service Application Program Interface (GSSAPI, also GSS-API) is an application programming interface for programs to access security services. It allows program to interact with security services such as Kerberos for user authentication.\u00a0\u21a9

  2. Those instructions should work on Linux and MacOs computers. For Windows , we recommend using the WSL, as described in the Prerequisites page.\u00a0\u21a9

","tags":["connection"]},{"location":"docs/advanced-topics/job-management/","title":"Job management","text":"","tags":["slurm"]},{"location":"docs/advanced-topics/job-management/#job-submission-limits","title":"Job submission limits","text":"

You may have encountered situations where your jobs get rejected at submission with errors like this:

sbatch: error: MaxSubmitJobsPerAccount\nsbatch: error: MaxSubmitJobsPerUser\n

There are a number of limits on Sherlock, that are put in place to guarantee that all of the users can have a fair access to resources and a smooth experience while using them. One of those limits is about the total number of jobs a single user (and a single group) can have in queue at any given time. This helps ensuring that the scheduler is able to continue operating in an optimal fashion, without being overloaded by a single user or group.

To see the job submission limits on Sherlock run the sh_part command.

To run longer than 2 days on the normal partition you will need to add the \"long\" QOS to your submission scripts. For example to run for exactly 3 days add the following two lines to your sbatch script:

#SBATCH --time=3-00:00:00\n#SBATCH --qos=long\n

If you have access to an owners partition you will not need to add this QOS since the MaxWall on owners is 7 days.

","tags":["slurm"]},{"location":"docs/advanced-topics/job-management/#minimizing-the-number-of-jobs-in-queue","title":"Minimizing the number of jobs in queue","text":"

It's generally a good practice to try reducing the number of jobs submitted to the scheduler, and depending on your workflow, there are various approaches for this. One solution may be to pack more work within a single job, which could help in reducing the overall number of jobs you'll have to submit.

Imagine you have a 100-task array job, where you run 1 app task per array item, which looks like this:

#!/bin/bash\n#SBATCH --array=1-100\n#SBATCH -n 1\n\n./app ${SLURM_ARRAY_TASK_ID}\n

This script would create 100 jobs in queue (even though they would all be regrouped under the same job array), each using 1 CPU to run 1 task.

Instead of that 100-task array job, you can try something like this:

#!/bin/bash\n#SBATCH --array=0-99:10\n#SBATCH -n 10\n\nfor i in {0..9}; do\n\u00a0 \u00a0 srun -n 1 ./app $((SLURM_ARRAY_TASK_ID+i)) &\ndone\n\nwait # important to make sure the job doesn't exit before the background tasks are done\n
  • --array=0-99:10 will use job array indexes 0, 10, 20 ... 90
  • -n 10 will make sure each job can be subdivided in 10 1-CPU steps
  • the for loop will launch 10 tasks, with indexes from SLURM_ARRAY_TASK_ID to SLURM_ARRAY_TASK_ID + 9.

This would submit a 10-task array job, each of them running 10 steps simultaneously, on the 10 CPUs that each of the job array item will be allocated.

In the end, you'll have run the same number of app instances, but you'll have divided the number of jobs submitted by 10, and allow you to submit the same amount of work to the scheduler, while staying under the submission limits.

","tags":["slurm"]},{"location":"docs/advanced-topics/node-features/","title":"Node features","text":"

In heterogeneous environments, computing resources are often grouped together into single pools of resources, to make things easier and more accessible. Most applications can run on any type of hardware, so having all resources regrouped in the same partitions maximizes utilization and make job submission much easier, as users don't have dozens of options to choose from.

But for more specific use cases, it may be necessary to specifically select the hardware jobs will run on, either for performance or reproducibility purposes.

To that end, all the compute nodes on Sherlock have feature tags assigned to them. Multiple characteristics are available for each node, such as their class, CPU manufacturer, generation, part number and frequency, as well as Infiniband and GPU characteristics.

Requiring specific node features is generally not necessary

Using node features is an advanced topic which is generally not necessary to run simple jobs on Sherlock. If you're just starting, you most likely don't need to worry about those, they're only useful in very specific cases.

","tags":["slurm","advanced"]},{"location":"docs/advanced-topics/node-features/#available-features","title":"Available features","text":"

The table below lists the possible features defined for each node.

Feature name Description Examples CLASS:xxx Node type, as defined in the Sherlock catalog CLASS:SH3_CBASE, CLASS:SH3_G4TF64 CPU_MNF:xxx CPU manufacturer CPU_MNF:INTEL, CPU_MNF:AMD CPU_GEN:xxx CPU generation CPU_GEN:RME for AMD RomeCPU_GEN:SKX for Intel Skylake CPU_SKU:xxx CPU name CPU_SKU:5118, CPU_SKU:7502P CPU_FRQ:xxx CPU core base frequency CPU_FRQ:2.50GHz, CPU_FRQ:2.75GHz GPU_BRD:xxx GPU brand GPU_BRD:GEFORCE, GPU_BRD:TESLA GPU_GEN:xxx GPU generation GPU_GEN:VLT for VoltaGPU_GEN:AMP for Ampere GPU_SKU:xxx GPU name GPU_SKU:A100_SXM4, GPU_SKU:RTX_3090 GPU_MEM:xxx GPU memory GPU_MEM:32GB, GPU_MEM:80GB GPU_CC:xxx GPU Compute Capabilities GPU_CC:6.1, GPU_CC:8.0 IB:xxx Infiniband generation/speed IB:EDR, IB:HDR NO_GPU special tag set on CPU-only nodes","tags":["slurm","advanced"]},{"location":"docs/advanced-topics/node-features/#listing-the-features-available-in-a-partition","title":"Listing the features available in a partition","text":"

All the node features available in a partition can be listed with sh_node_feat command.

For instance, to list all the GPU types in the gpu partition:

$ sh_node_feat -p gpu | grep GPU_SKU\nGPU_SKU:P100_PCIE\nGPU_SKU:P40\nGPU_SKU:RTX_2080Ti\nGPU_SKU:V100_PCIE\nGPU_SKU:V100S_PCIE\nGPU_SKU:V100_SXM2\n

To list all the CPU generations available in the normal partition:

$ sh_node_feat -p normal | grep CPU_GEN\nCPU_GEN:BDW\nCPU_GEN:MLN\nCPU_GEN:RME\nCPU_GEN:SKX\n
","tags":["slurm","advanced"]},{"location":"docs/advanced-topics/node-features/#requesting-specific-node-features","title":"Requesting specific node features","text":"

Those node features can be used in job submission options, as additional constraints for the job, so that the scheduler will only select nodes that match the requested features.

Adding job constraints often increases job pending times

It's important to keep in mind that requesting specific node features usually increases job pending times in queue. The more constraints the scheduler has to satisfy, the smaller the pool of compute nodes jobs can run on. hence the longer it may take for the scheduler to find eligible resources to run those jobs.

To specify a node feature as a job constraint, the -C/--constraint option can be used.

For instance, to submit a job that should only run on an AMD Rome CPU, you can add the following to your job submission options:

#SBATCH -C CPU_GEN:RME\n

Or to make sure that your training job will run on a GPU with 80GB of GPU memory:

#SBATCH -G 1\n#SBATCH -C GPU_MEM:80GB\n
","tags":["slurm","advanced"]},{"location":"docs/advanced-topics/node-features/#multiple-constraints","title":"Multiple constraints","text":"

For more complex cases, multiple constraints could be composed in different ways, using logical operators.

Many node feature combinations are impossible to satisfy

Many combinations will result in impossible conditions, and will make jobs impossible to run on any node. The scheduler is usualyl able to detect this and reject the job at submission time.

For instance, submitting a job requesting an Intel CPU on the HDR IB fabric:

#SBATCH -C 'CPU_MNF:INTEL&IB:HDR'\n

will result in the following error:

error: Job submit/allocate failed: Requested node configuration is not available\n

as all the compute nodes on the IB fabric use AMD CPUs. Constraints must be used carefully and sparsingly to avoid unexpected suprises.

Some of the possible logical operations between constraints are listed below:

","tags":["slurm","advanced"]},{"location":"docs/advanced-topics/node-features/#and","title":"AND","text":"

Only nodes with all the requested features are eligible to run the job. The ampersand sign (&) is used as the AND operator. For example:

#SBATCH -C 'GPU_MEM:32GB&IB:HDR'\n

will request a GPU with 32GB of memory on the HDR Infiniband fabric to run the job.

","tags":["slurm","advanced"]},{"location":"docs/advanced-topics/node-features/#or","title":"OR","text":"

Only nodes with at least one of specified features will be eligible to run the job. The pipe sign (|) is used as the OR operator.

In multi-node jobs, it means that nodes allocated to the job may end up having different features. For example, the following options:

#SBATCH -N 1\n#SBATCH -C \"CPU_GEN:RME|CPU_GEN:MLN\"\n

may result in a two-node job where one node as an AMD Rome CPU, and the other node has a AMD Milan CPU.

","tags":["slurm","advanced"]},{"location":"docs/advanced-topics/node-features/#matching-or","title":"Matching OR:","text":"

When you need all nodes in a multi-node job to have the same set of features, a matching OR condition can be defined by enclosing the options within square brackets ([,]).

For instance, the following options may be used to request a job to run on nodes with the same frequency, either 2.5 GHz or 2.75GHz:

#SBATCH -C \"[CPU_FRQ:2.50GHz|CPU_FRQ:2.75GHz]\"\n

Node features are text tags

Node features are text tags, they have no associated numerical value, meaning that they can't be compared.

For instance, it's possible to add a constraint for GPU Compute Capabilities greater than 8.0. The workaround is to add a job constraint that satisfies all the possible values of that tag, like:

#SBATCH -C \"GPU_CC:8.0|GPU_CC:8.6\"\n

For more information, complete details about the --constraints/-C job submission option and its syntax can be found in the official Slurm documentation.

","tags":["slurm","advanced"]},{"location":"docs/getting-started/","title":"Getting started","text":""},{"location":"docs/getting-started/#prerequisites","title":"Prerequisites","text":"

To start using Sherlock, you will need:

  • an active SUNet ID,

    What is a SUNet ID?

    A SUNet ID is a unique 3-8 character account name that identifies you as a member of the Stanford community, with access to the Stanford University Network of computing resources and services. Not to be confused with University ID (a 8-digit number that appears on your Stanford ID Card), your SUNet ID is a permanent and visible part of your Stanford identity and often appears in your Stanford email address (eg. sunetid@stanford.edu).

    SUNet IDs are not managed by Research Computing. For more information, see https://accounts.stanford.edu/

    SUNet ID service levels and external collaborators

    Base-level service is sufficient for Sherlock accounts. External collaborators, or users without a SUNet ID, can be sponsored by a PI a get a sponsored SUNet ID at no cost. Please see the sponsorship page for more information.

  • a Sherlock account,

  • a SSH client,
  • good understanding of the concepts and terms used throughout that documentation,
  • some familiarity with Unix/Linux command-line environments, and notions of shell scripting.
"},{"location":"docs/getting-started/#how-to-request-an-account","title":"How to request an account","text":"

To request an account, the sponsoring Stanford faculty member should email srcc-support@stanford.edu, specifying the names and SUNet IDs of his/her research team members needing an account.

Sherlock is open to the Stanford community as a computing resource to support departmental or sponsored research, thus a faculty member's explicit consent is required for account requests.

Sherlock is a resource for research

Sherlock is a resource to help and support research, and is not suitable for course work, class assignments or general-use training sessions.

There is no fee associated with using Sherlock, and no limit in the amount of accounts each faculty member can request. We will periodically ensure that all accounts associated with each PI are still active, and reserve the right to close any Sherlock account whose SUNet ID is expired.

"},{"location":"docs/getting-started/#ssh-clients","title":"SSH clients","text":""},{"location":"docs/getting-started/#linux","title":"Linux","text":"

Linux distributions usually come with a version of the OpenSSH client already installed. So no additional software installation is required. If not, please refer to your distribution's documentation to install it.

"},{"location":"docs/getting-started/#macos","title":"macOS","text":"

macOS systems usually come with a version of the OpenSSH client already installed. So no additional software installation is required

"},{"location":"docs/getting-started/#windows","title":"Windows","text":"

Microsoft Windows includes a SSH client by default, that can be used to connect to Sherlock from a Windows terminal.

Windows also has a feature called the \"Windows Subsystem for Linux\" (WSL), which provides a Linux-like experience and make switching across systems more seamless. Please refer to the official documentation or this HOWTO for installation instructions.

The two options above will ensure the best compatibility with the Sherlock environment. If you'd like to explore other avenues, many other SSH client implementations are available, but have not necessarily been tested with Sherlock, so your mileage may vary.

"},{"location":"docs/getting-started/#unixlinux-resources","title":"Unix/Linux resources","text":"

A full tutorial on using Unix/Linux is beyond the scope of this documentation. However, there are many tutorials for beginning to use Unix/Linux on the web.

A few tutorials we recommend are:

  • Introduction to Unix (Imperial College, London)
  • The Unix Shell (Software Carpentry)

More specifically about HPC and Research Computing:

  • HPC in a day (Software Carpentry}
  • Intro to HPC (HPC Carpentry)
  • Research Computing Q&A (Ask.Cyberinfrastructure)
"},{"location":"docs/getting-started/#text-editors","title":"Text editors","text":"

Multiple text editors are available on Sherlock. For beginners, we recommend the use of nano. And for more advanced uses, you'll also find below some resources about using vim

  • nano guide (Gentoo wiki)
  • vim guide (Gentoo wiki)

Note: you can also create/edit files with the Sherlock OnDemand File editor

"},{"location":"docs/getting-started/#shell-scripting","title":"Shell scripting","text":"

Compute jobs launched on Sherlock are most often initialized by user-written shell scripts. Beyond that, many common operations can be simplified and automated using shell scripts.

For an introduction to shell scripting, you can refer to:

  • Bash Programming - Introduction HOWTO
"},{"location":"docs/getting-started/connecting/","title":"Connecting to Sherlock","text":"

Sherlock account required

To be able to connect to Sherlock, you must first obtain a Sherlock account.

","tags":["connection"]},{"location":"docs/getting-started/connecting/#credentials","title":"Credentials","text":"

All users must have a Stanford SUNet ID and a Sherlock account to log in to Sherlock. Your Sherlock account uses the same username/password as your SUnet ID:

Username: SUNet ID\nPassword: SUNet ID password\n

To request a Sherlock account, please see the Prerequisites page.

Resetting passwords

Sherlock does not store your SUNet ID password. As a consequence, we are unable to reset your password. If you require password assistance, please see the SUNet Account page.

","tags":["connection"]},{"location":"docs/getting-started/connecting/#connection","title":"Connection","text":"

Access to Sherlock is provided via Secure Shell (SSH) login. Most Unix-like operating systems provide an SSH client by default that can be accessed by typing the ssh command in a terminal window.

To login to Sherlock, open a terminal and type the following command, where <sunetid> should be replaced by your actual SUNet ID:

$ ssh <sunetid>@login.sherlock.stanford.edu\n

Upon logging in, you will be connected to one of Sherlock's load-balanced login node. You should be automatically directed to the least-loaded login node at the moment of your connection, which should give you the best possible environment to work.

","tags":["connection"]},{"location":"docs/getting-started/connecting/#host-keys","title":"Host keys","text":"

Upon your very first connection to Sherlock, you will be greeted by a warning such as :

The authenticity of host 'login.sherlock.stanford.edu' can't be established.\nECDSA key fingerprint is SHA256:eB0bODKdaCWtPgv0pYozsdC5ckfcBFVOxeMwrNKdkmg.\nAre you sure you want to continue connecting (yes/no)?\n

The same warning will be displayed if your try to connect to one of the Data Transfer Node (DTN):

The authenticity of host 'dtn.sherlock.stanford.edu' can't be established.\nECDSA key fingerprint is SHA256:eB0bODKdaCWtPgv0pYozsdC5ckfcBFVOxeMwrNKdkmg.\nAre you sure you want to continue connecting (yes/no)?\n

This warning is normal: your SSH client warns you that it is the first time it sees that new computer. To make sure you are actually connecting to the right machine, you should compare the ECDSA key fingerprint shown in the message with one of the fingerprints below:

Key type Key Fingerprint RSA SHA256:T1q1Tbq8k5XBD5PIxvlCfTxNMi1ORWwKNRPeZPXUfJAlegacy format: f5:8f:01:46:d1:f9:66:5d:33:58:b4:82:d8:4a:34:41 ECDSA SHA256:eB0bODKdaCWtPgv0pYozsdC5ckfcBFVOxeMwrNKdkmglegacy format: 70:4c:76:ea:ae:b2:0f:81:4b:9c:c6:5a:52:4c:7f:64

If they match, you can proceed and type \u2018yes\u2019. Your SSH program will then store that key and will verify it for every subsequent SSH connection, to make sure that the server you're connecting to is indeed Sherlock.

","tags":["connection"]},{"location":"docs/getting-started/connecting/#host-keys-warning","title":"Host keys warning","text":"

If you've connected to Sherlock 1.0 before, there's a good chance the Sherlock 1.0 keys were stored by your local SSH client. In that case, when connecting to Sherlock 2.0 using the sherlock.stanford.edu alias, you will be presented with the following message:

@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n@ WARNING: POSSIBLE DNS SPOOFING DETECTED! @\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\nThe RSA host key for sherlock.stanford.edu has changed, and the key for\nthe corresponding IP address 171.66.97.101 is unknown. This could\neither mean that DNS SPOOFING is happening or the IP address for the\nhost and its host key have changed at the same time.\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n@ WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED! @\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\nIT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY!\nSomeone could be eavesdropping on you right now (man-in-the-middle\nattack)!  It is also possible that a host key has just been changed.\nThe fingerprint for the RSA key sent by the remote host is\nSHA256:T1q1Tbq8k5XBD5PIxvlCfTxNMi1ORWwKNRPeZPXUfJA.\nPlease contact your system administrator.\n

You can just check that the SHA256 key listed in that warning message correctly matches the one listed in the table above, and if that's the case, you can safely remove the sherlock.stanford.edu entry from your ~/.ssh/known_hosts file with the following command on your local machine:

$ ssh-keygen -R sherlock.stanford.edu\n

and then connect again. You'll see the first-connection prompt mentioned above, and your SSH client will store the new keys for future connections.

","tags":["connection"]},{"location":"docs/getting-started/connecting/#authentication","title":"Authentication","text":"","tags":["connection"]},{"location":"docs/getting-started/connecting/#password","title":"Password","text":"

To ease access and increase compatibility1 with different platforms, Sherlock allows a simple password-based authentication mechanism for SSH.2.

Upon connection, you will be asked for your SUNet ID password with the following prompt:

<sunetid>@login.sherlock.stanford.edu's password:\n

Enter your password, and if it's correct, you should see the following line:

Authenticated with partial success.\n
","tags":["connection"]},{"location":"docs/getting-started/connecting/#second-factor-2fa","title":"Second factor (2FA)","text":"

Sherlock implements Stanford's Minimum Security Standards policies which mandate two-step authentication to access the cluster.

Two-step authentication protects your personal information and credentials by combining something only you know (your password) with something only you have (your phone, tablet or token). This prevents an attacker who would steal your password to actually use it to impersonate you. For more details about two-step authentication at Stanford, please refer to the University IT two-step page.

After successfully entering your password, you'll be prompted for your second authentication factor with a message like this:

Duo two-factor login for <sunetid>\n\nEnter a passcode or select one of the following options:\n\n 1. Duo Push to XXX-XXX-9999\n 2. Phone call to XXX-XXX-9999\n 3. SMS passcodes to XXX-XXX-9999 (next code starts with: 9)\n\nPasscode or option (1-3):\n

Avoiding two-factor prompt on each connection

If you routinely open multiple sessions to Sherlock, having to confirm each one of them with a second authentication factor could rapidely become cumbersome. To work around this, the OpenSSH client allows multiplexing channels and re-using existing authenticated for opening new sessions. Please see the Advanced Connection Options page for more details.

If your second factor is accepted, you'll see the following message:

Success. Logging you in...\n
","tags":["connection"]},{"location":"docs/getting-started/connecting/#troubleshooting","title":"Troubleshooting","text":"","tags":["connection"]},{"location":"docs/getting-started/connecting/#timeouts","title":"Timeouts","text":"

If you ever encounter timeout errors when connecting to Sherlock, like these:

$ ssh login.sherlock.stanford.edu\nssh: connect to host login.sherlock.stanford.edu port 22: Operation timed out\n

you can try to either:

  • switch to a wired connection if you're connecting over wifi,
  • connect via the Stanford VPN
","tags":["connection"]},{"location":"docs/getting-started/connecting/#authentication-failures","title":"Authentication failures","text":"

Excessive authentication failures

Entering an invalid password multiple times will result in a (temporary) ban of your IP address.

To prevent brute-force password guessing attacks on Sherlock login nodes, we automatically block IP addresses that generate too many authentication failures in a given time span. This results in a temporary ban of the infringing IP address, and the impossibility for the user to connect to Sherlock from that IP address.

When this happens, your SSH connection attempts will result in the following error:

ssh: connect to host login.sherlock.stanford.edu port 22: Connection refused\n

IP blocked by this mechanism will automatically be authorized again after a few minutes.

SSHFS on macOS

SSHFS on macOS is known to try to automatically reconnect filesystem mounts after resuming from sleep or uspend, even without any valid credentials. As a result, it will generate a lot of failed connection attempts and likely make your IP address blacklisted on login nodes.

Make sure to unmount your SSHFS drives before putting your macOS system to sleep to avoid this situation.

VPN

If your IP got blocked and you have an urgent need to connect, before the automatic blacklist expiration, we recommend trying to connect through Stanford's VPN: your computer will then use a different IP address and will not be affected by the ban on your regular IP address.

","tags":["connection"]},{"location":"docs/getting-started/connecting/#login","title":"Login","text":"

Congratulations! You've successfully connected to Sherlock. You'll be greeted by the following message of the day:

             --*-*- Stanford Research Computing Center -*-*--\n                  ____  _               _            _\n                 / ___|| |__   ___ _ __| | ___   ___| | __\n                 \\___ \\| '_ \\ / _ \\ '__| |/ _ \\ / __| |/ /\n                  ___) | | | |  __/ |  | | (_) | (__|   <\n                 |____/|_| |_|\\___|_|  |_|\\___/ \\___|_|\\_\\\n\n-----------------------------------------------------------------------------\n  This system is for authorized users only and users must comply with all\n  Stanford computing, network and research policies. All activity may be\n  recorded for security and monitoring purposes. For more information, see\n  https://doresearch.stanford.edu/policies/research-policy-handbook and\n  https://adminguide.stanford.edu/chapter-6/subchapter-2/policy-6-2-1\n-----------------------------------------------------------------------------\n  Sherlock is *NOT* approved for storing or processing HIPAA, PHI, PII nor\n  any kind of High Risk data. Users are responsible for the compliance of\n  their data.\n  See https://uit.stanford.edu/guide/riskclassifications for details.\n-----------------------------------------------------------------------------\n\n        Docs         https://www.sherlock.stanford.edu/docs\n        Support      https://www.sherlock.stanford.edu/docs/#support\n\n        Web          https://www.sherlock.stanford.edu\n        News         https://news.sherlock.stanford.edu\n        Status       https://status.sherlock.stanford.edu\n\n-----------------------------------------------------------------------------\n

Once authenticated to Sherlock, you'll see the following prompt:

[<sunetid>@sh03-ln01 login! ~]$

It indicates the name of the login node you've been connected to, and a reminder that you're actually connected to a login node, not a compute node.

Login nodes are not for computing

Login nodes are shared among many users and therefore must not be used to run computationally intensive tasks. Those should be submitted to the scheduler which will dispatch them on compute nodes.

By contrast, the shell prompt on a compute node looks like this:

[<sunetid>@sh03-01n01 ~]$

","tags":["connection"]},{"location":"docs/getting-started/connecting/#start-computing","title":"Start computing","text":"

To start computing, there's still a extra step required, which is requesting resources to run your application. It's all described in the next section.

  1. On Sherlock 1.0, GSSAPI tokens (based on Kerberos tickets) were the only allowed authentication method, which could cause some interoperability with third-party SSH clients.\u00a0\u21a9

  2. For other methods of authentication, see the Advanced Connection Options page.\u00a0\u21a9

","tags":["connection"]},{"location":"docs/getting-started/submitting/","title":"Submitting jobs","text":"","tags":["slurm"]},{"location":"docs/getting-started/submitting/#principle","title":"Principle","text":"

Login nodes are not for computing

Login nodes are shared among many users and therefore must not be used to run computationally intensive tasks. Those should be submitted to the scheduler which will dispatch them on compute nodes.

","tags":["slurm"]},{"location":"docs/getting-started/submitting/#requesting-resources","title":"Requesting resources","text":"

A mandatory prerequisite for running computational tasks on Sherlock is to request computing resources. This is done via a resource scheduler, whose very purpose is to match compute resources in the cluster (CPUs, GPUs, memory, ...) with user resource requests.

The scheduler provides three key functions:

  1. it allocates access to resources (compute nodes) to users for some duration of time so they can perform work.
  2. it provides a framework for starting, executing, and monitoring work (typically a parallel job such as MPI) on a set of allocated nodes.
  3. it arbitrates contention for resources by managing a queue of pending jobs
","tags":["slurm"]},{"location":"docs/getting-started/submitting/#slurm","title":"Slurm","text":"

Sherlock uses Slurm, an open-source resource manager and job scheduler, used by many of the world's supercomputers and computer clusters.

Slurm supports a variety of job submission techniques. By accurately requesting the resources you need, you\u2019ll be able to get your work done.

Wait times in queue

As a quick rule of thumb, it's important to keep in mind that the more resources your job requests (CPUs, GPUs, memory, nodes, and time), the longer it may have to wait in queue before it could start.

In other words: accurately requesting resources to match your job's needs will minimize your wait times.

","tags":["slurm"]},{"location":"docs/getting-started/submitting/#how-to-submit-a-job","title":"How to submit a job","text":"A job consists in two parts: resource requests and job steps.

Resource requests describe the amount of computing resource (CPUs, GPUs, memory, expected run time, etc.) that the job will need to successfully run.

Job steps describe tasks that must be executed.

","tags":["slurm"]},{"location":"docs/getting-started/submitting/#batch-scripts","title":"Batch scripts","text":"

The typical way of creating a job is to write a job submission script. A submission script is a shell script (e.g. a Bash script) whose first comments, if they are prefixed with #SBATCH, are interpreted by Slurm as parameters describing resource requests and submissions options1.

The submission script itself is a job step. Other job steps are created with the srun command.

For instance, the following script would request one task with one CPU for 10 minutes, along with 2 GB of memory, in the default partition:

submit.sh
#!/bin/bash\n#\n#SBATCH --job-name=test\n#\n#SBATCH --time=10:00\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=1\n#SBATCH --mem-per-cpu=2G\n\nsrun hostname\nsrun sleep 60\n

When started, the job would run a first job step srun hostname, which will launch the command hostname on the node on which the requested CPU was allocated. Then, a second job step will start the sleep command.

You can create this job submission script on Sherlock using a text editor such as nano or vim, and save it as submit.sh.

#SBATCH directives syntax

#SBATCH directives must be at the top of the script

Slurm will ignore all #SBATCH directives after the first non-comment line (that is, the first line in the script that doesn't start with a # character). Always put your #SBATCH parameters at the top of your batch script.

Spaces in parameters will cause #SBATCH directives to be ignored

Slurm will ignore all #SBATCH directives after the first white space. For instance directives like those:

#SBATCH --job-name=big job\n
#SBATCH --mem=16 G\n
#SBATCH --partition=normal, owners\n
will cause all following #SBATCH directives to be ignored and the job to be submitted with the default parameters.

","tags":["slurm"]},{"location":"docs/getting-started/submitting/#job-submission","title":"Job submission","text":"

Once the submission script is written properly, you can submit it to the scheduler with the sbatch command. Upon success, sbatch will return the ID it has assigned to the job (the jobid).

$ sbatch submit.sh\nSubmitted batch job 1377\n
","tags":["slurm"]},{"location":"docs/getting-started/submitting/#check-the-job","title":"Check the job","text":"

Once submitted, the job enters the queue in the PENDING state. When resources become available and the job has sufficient priority, an allocation is created for it and it moves to the RUNNING state. If the job completes correctly, it goes to the COMPLETED state, otherwise, its state is set to FAILED.

You'll be able to check the status of your job and follow its evolution with the squeue -u $USER command:

$ squeue -u $USER\n     JOBID PARTITION     NAME     USER ST       TIME  NODES NODELIST(REASON)\n      1377    normal     test   kilian  R       0:12      1 sh02-01n01\n

The scheduler will automatically create an output file that will contain the result of the commands run in the script file. That output file is names slurm-<jobid>.out by default, but can be customized via submission options. In the above example, you can list the contents of that output file with the following commands:

$ cat slurm-1377.out\nsh02-01n01\n

Congratulations, you've submitted your first batch job on Sherlock!

","tags":["slurm"]},{"location":"docs/getting-started/submitting/#whats-next","title":"What's next?","text":"

Actually, quite a lot. Although you now know how to submit a simple batch job, there are many other options and areas to explore in the next sections:

  • Data transfer
  • Storage
  • Running jobs
  1. You can get the complete list of parameters by referring to the sbatch manual page (man sbatch).\u00a0\u21a9

","tags":["slurm"]},{"location":"docs/software/","title":"Software on Sherlock","text":""},{"location":"docs/software/#available-software","title":"Available software","text":"

A set of supported software installations is provided for use on Sherlock. This software is made available through a Software Modules system. For the complete list of available software, please refer to the Software List page.

Licensed software can be used on Sherlock, under certain conditions. Feel free to contact us for more details or if you have questions. For more information about purchasing software licenses, you can contact the Stanford Software Licensing office.

"},{"location":"docs/software/#installation-requests","title":"Installation requests","text":"

Installation requests

The Stanford Research Computing team installs, for general use, a set of libraries, tools and software applications that are commonly used across many research groups. However, our staff resources are quite limited and don't allow us to build nor maintain custom software applications that may be requested by or be of use to a small number of users.

We strongly encourage users to build custom and field- or domain-specific software themselves, and install it in their own personal or group shared directories. That way, they can share the software installations with the rest of the users in their group, if necessary.

Users may even maintain and publish their own local module files to dynamically configure a running environment to use the software. They could share those modules with other users to simplify the use of their own custom software installations.

Installing your own software

For more information about building your own software on Sherlock, please see the Software Installation page

If the software you need is not in the list of available software, and you have trouble installing it on your own, please contact us with as much details about the package as possible, and we will try to help you install it.

If it's a widely used software that could benefit multiple users across different scientific communities, we will consider install it globally as resources permit1.

"},{"location":"docs/software/#contributed-software","title":"Contributed software","text":"

PI groups and labs can share their software installations and modules with the whole Sherlock user community, and let everyone benefit from their tuning efforts and software developments.

Contributed software is supported and maintained by each lab, and contact information is usually provided in the contribs module. See the Modules page for more information about using software modules on Sherlock.

If you're interested in sharing your software installations beyond your own group on Sherlock, please let us know, and we'll get in touch.

  1. Software requests, including version upgrades, are fulfilled in the order they are received, and as time permits. We don't have any dedicated team for software installations, and requests are handled along with other duties, typically within two to three weeks of being received.\u00a0\u21a9

"},{"location":"docs/software/install/","title":"Installation","text":"

Software installation requests

For more information about software installation requests, please see the Software Overview page

If the software package or version you need is not available in the list of provided software, you may compile and install it yourself. The recommended location for user-installed software is the $GROUP_HOME group shared directory, which is snapshotted and replicated off-site, and can easily be shared with members of a research group.

Work in progress

This page is a work in progress and is not complete yet. We are actively working on adding more content and information.

"},{"location":"docs/software/list/","title":"List","text":""},{"location":"docs/software/list/#software-list","title":"Software list","text":"

The full list of software centrally installed and managed on Sherlock is in the tables below.

Permanent work in progress

Software installations on Sherlock are an ever ongoing process. We're continuously adding new software to the list. If you're looking for something that is not in the list, there may be other options.

Subscribe to updates

Never want to miss a software update again? Stay up-to-date with new software updates by following the Sherlock software update RSS feed.

"},{"location":"docs/software/list/#categories","title":"Categories","text":"

Software modules on Sherlock are organized in categories, by scientific field or functional class. It means that you will have to first load a category module before getting access to individual modules. The math and devel categories are loaded by default. See the Modules page for further details and examples.

We currently provide 587 software modules, in 7 categories, covering 94 fields of science:

  • biology clinical science, computational biology, cryo-em, genomics, molecular biology, neurology, pathology, phylogenetics, population genetics, radiology, workflow management

  • chemistry cheminformatics, computational chemistry, crystallography, docking, electrostatics, molecular dynamics, quantum chemistry, tools

  • devel build, compiler, data, data analytics, debug, engine, framework, IDE, language, lib, mpi, networking, parser, profiling, runtime

  • math computational geometry, deep learning, graph computing, lib, linear algebra, machine learning, numerical analysis, numerical library, optimization, scientific computing, statistics, symbolic, technical computing, topic modelling

  • physics astronomy, CFD, cliemate modeling, climate modeling, geophysics, geoscience, lib, magnetism, materials science, micromagnetics, particle, photonics, quantum information science, quantum mechanics

  • system backup, benchmark, checkpointing, cloud interface, compiler, compression, containers, database, document management, document processing, file management, file transfer, framework, hardware, job management, language, libs, media, performance, resource monitoring, scm, shell, testing, tool, tools

  • viz data, gis, graphs, imaging, molecular visualization, plotting, remote display

Licensed software

Access to software modules marked with in the tables below is restricted to properly licensed user groups.

Stanford Research Computing is not funded to provide commercial software on Sherlock and researchers are responsible for the costs of purchasing and renewing commercial software licenses. For more information, please feel free to contact us and see the Stanford Software Licensing page for purchasing information.

Additional flags and features

Some of the modules listed below have been built to support specific architectures or parallel execution modes:

  • versions marked with support GPU acceleration
  • versions marked with support MPI parallel execution
  • versions marked with are the default version for the module
"},{"location":"docs/software/list/#biology","title":"biology","text":"Field Module\u00a0name Version(s) URL Description clinical science simvascular 20180704 Website Simvascular is a blood flow simulation and analysis toolkit. This module provides the svFSI (Fluid Solid Interaction) solver. computational biology py-biopython 1.70_py271.79_py361.79_py39 Website Biopython is a set of freely available tools for biological computation written in Python. computational biology rosetta 3.8 3.14 Website Rosetta is the premier software suite for modeling macromolecular structures. As a flexible, multi-purpose application, it includes tools for structure prediction, design, and remodeling of proteins and nucleic acids. cryo-em ctffind 4.1.13 Website ctffind is a program for finding CTFs of electron micrographs. cryo-em eman2 2.2 2.91 Website EMAN2 is a broadly based greyscale scientific image processing suite with a primary focus on processing data from transmission electron microscopes. cryo-em imod 4.9.12 4.11.5 Website IMOD is a set of image processing, modeling and display programs used for tomographic reconstruction and for 3D reconstruction of EM serial sections and optical sections. cryo-em motioncor2 1.3.1 1.5.0 1.6.4 Website MotionCor2 is a multi-GPU accelerated program which corrects anisotropic image motion at the single pixel level. cryo-em py-topaz 0.2.4_py36 0.2.5_py39 Website A pipeline for particle detection in cryo-electron microscopy images using convolutional neural networks trained from positive and unlabeled examples. cryo-em relion 2.0.3 2.1 4.0.1 Website RELION (for REgularised LIkelihood OptimisatioN, pronounce rely-on) is a stand-alone computer program that employs an empirical Bayesian approach to refinement of (multiple) 3D reconstructions or 2D class averages in electron cryo-microscopy (cryo-EM). genomics angsd 0.9190.931 Website ANGSD is a software for analyzing next generation sequencing data. genomics augustus 3.3.2 Website AUGUSTUS is a program that predicts genes in eukaryotic genomic sequences. genomics bamtools 2.5.1 Website BamTools is a project that provides both a C++ API and a command-line toolkit for reading, writing, and manipulating BAM (genome alignment) files. genomics bcftools 1.61.81.16 Website BCFtools is a program for variant calling and manipulating files in the Variant Call Format (VCF) and its binary counterpart BCF. genomics bcl-convert 4.2.7 Website The BCL Convert App generates demultiplexed FASTQ files from a run as input. genomics bcl2fastq 2.20 Website The bcl2fastq2 conversion software can be used to convert BCL files from MiniSeq, MiSeq, NextSeq, HiSeq, iSeq and NovaSeq sequening systems. genomics bedops 2.4.40 Website BEDOPS is an open-source command-line toolkit that performs highly efficient and scalable Boolean and other set operations, statistical calculations, archiving, conversion and other management of genomic data of arbitrary scale. genomics bedtools 2.27.12.30.0 Website The bedtools utilities are a swiss-army knife of tools for a wide-range of genomics analysis tasks. genomics bgen 1.1.4 Website bgen is the reference implementation of the BGEN format, a binary file format for imputed genotype and haplotype data. genomics bowtie 1.2.2 Website Bowtie is an ultrafast, memory-efficient short read aligner. genomics bowtie2 2.3.4.1 Website Bowtie 2 is an ultrafast and memory-efficient tool for aligning sequencing reads to long reference sequences. genomics breseq 0.38.1 Website breseq is a computational pipeline for finding mutations relative to a reference sequence in short-read DNA resequencing data. genomics bwa 0.7.17 Website BWA (Burrows-Wheeler Aligner) is a software package for mapping low-divergent sequences against a large reference genome, such as the human genome. genomics canu 1.8 Website A single molecule sequence assembler for genomes large and small. genomics cellranger 7.1.0 Website Cell Ranger is a set of analysis pipelines that process Chromium single-cell RNA-seq output to align reads, generate gene-cell matrices and perform clustering and gene expression analysis. genomics cellranger-atac 2.1.0 Website Cell Ranger ATAC is a set of analysis pipelines that process Chromium Single Cell ATAC data. genomics cufflinks 2.2.1 Website Cufflinks assembles transcripts, estimates their abundances, and tests for differential expression and regulation in RNA-Seq samples. genomics dorado 0.3.40.5.3 Website Dorado is a high-performance, easy-to-use, open source basecaller for Oxford Nanopore reads. genomics fastqc 0.11.8 Website FastQC aims to provide a simple way to do some quality control checks on raw sequence data coming from high throughput sequencing pipelines. genomics fastx_toolkit 0.0.14 Website The FASTX-Toolkit is a collection of command line tools for Short-Reads FASTA/FASTQ files preprocessing. genomics freebayes 1.2.0 Website FreeBayes is a Bayesian genetic variant detector designed to find small polymorphisms. genomics gatk 4.1.0.04.1.4.1 Website GATK (Genome Analysis Toolkit) offers a wide variety of tools with a primary focus on variant discovery and genotyping. genomics gemma 0.98.5 Website GEMMA is a software toolkit for fast application of linear mixed models (LMMs) and related models to genome-wide association studies (GWAS) and other large-scale data sets. genomics hic-pro 2.10.0 Website HiC-Pro: An optimized and flexible pipeline for Hi-C data processing. genomics hisat2 2.1.0 Website HISAT2 is a fast and sensitive alignment program for mapping next-generation sequencing reads (both DNA and RNA) to a population of human genomes (as well as to a single reference genome). genomics htslib 1.61.81.10.21.141.16 Website C library for high-throughput sequencing data formats. genomics jellyfish 2.2.10 Website A fast multi-threaded k-mer counter. genomics kallisto 0.44.0 0.46.10.50.1 Website kallisto is a program for quantifying abundances of transcripts from RNA-Seq data using high-throughput sequencing reads. genomics metal 20110325 Website The METAL software is designed to facilitate meta-analysis of large datasets (such as several whole genome scans) in a convenient, rapid and memory efficient manner. genomics mixcr 2.1.124.6.0 Website MiXCR is a universal framework that processes big immunome data from raw sequences to quantitated clonotypes. genomics ncbi-blast+ 2.6.02.7.12.11.0 Website NCBI BLAST+ is a suite of command-line tools to run BLAST (Basic Local Alignment Search Tool), an algorithm for comparing primary biological sequence information. genomics ncbi-vdb 3.0.7 Website NCBI VDB is the database engine used by NCBI SRA tools. genomics plink 1.071.90b5.32.0a12.0a2 Website PLINK is a free, open-source whole genome association analysis toolset, designed to perform a range of basic, large-scale analyses in a computationally efficient manner. genomics popscle 0.1 Website popscle is a suite of population scale analysis tools for single-cell genomics data. genomics py-busco 3.0.2_py27 Website Assessing genome assembly and annotation completeness with Benchmarking Universal Single-Copy Orthologs (BUSCO). genomics py-bx-python 0.8.1_py270.8.13_py39 Website Tools for manipulating biological data, particularly multiple sequence alignments. genomics py-cutadapt 1.18_py27 1.18_py36 Website Cutadapt finds and removes adapter sequences, primers, poly-A tails and other types of unwanted sequence from your high-throughput sequencing reads. genomics py-deeplabcut 2.2.3_py39 Website A software package for animal pose estimation. genomics py-deeptools 3.3.1_py36 Website Tools to process and analyze deep sequencing data. genomics py-fithic 1.1.3_py27 Website Fit-Hi-C is a tool for assigning statistical confidence estimates to chromosomal contact maps produced by genome architecture assays. genomics py-htseq 2.0.1_py39 Website HTSeq is a Python library to facilitate processing and analysis of data from high-throughput sequencing (HTS) experiments. genomics py-macs2 2.1.1_py272.2.9.1_py39 Website MACS (Model-based Analysis of ChIP-Seq) implements a novel ChIP-Seq analysis method. genomics py-mageck 0.5.9.4_py36 Website Model-based Analysis of Genome-wide CRISPR-Cas9 Knockout (MAGeCK) is a computational tool to identify important genes from the recent genome-scale CRISPR-Cas9 knockout screens technology. genomics py-mapdamage 2.2.1_py36 Website mapDamage2 is a computational framework which tracks and quantifies DNA damage patterns among ancient DNA sequencing reads generated by Next-Generation Sequencing platforms. genomics py-multiqc 1.6_py27 1.6_py36 Website MultiQC is a reporting tool that parses summary statistics from results and log files generated by other bioinformatics tools. genomics py-obitools 1.2.13_py27 Website OBITools is a set of programs designed for analyzing NGS data in a DNA metabarcoding context. genomics py-orthofinder 2.5.4_py39 Website OrthoFinder is a fast, accurate and comprehensive platform for comparative genomics. genomics py-pybedtools 0.8.0_py270.8.2_py360.9.0_py39 Website Pybedtools wraps and extends BEDTools and offers feature-level manipulations from within Python. genomics py-pysam 0.14.1_py270.15.3_py360.18.0_py39 Website Pysam is a python module for reading, manipulating and writing genomic data sets. genomics py-scanpy 1.8.2_py39 Website Scanpy is a scalable toolkit for analyzing single-cell gene expression data. genomics py-vcf2gwas 0.8.9_py39 Website Python API for comprehensive GWAS analysis using GEMMA. genomics py-vispr 0.4.17_py36 Website A visualization framework for CRISPR/Cas9 knockout screens, analyzed with MAGeCK. genomics regenie 2.2.4 Website regenie is a C++ program for whole genome regression modelling of large genome-wide association studies. genomics rsem 1.3.3 Website RSEM is a software package for estimating gene and isoform expression levels from RNA-Seq data. genomics salmon 0.12.01.10.0 Website Highly-accurate & wicked fast transcript-level quantification from RNA-seq reads using lightweight alignments. genomics samtools 1.61.81.16.1 Website Tools (written in C using htslib) for manipulating next-generation sequencing data. genomics sentieon 201808.01 202112.01 Website Sentieon Genomics software is a set of software tools that perform analysis of genomic data obtained from DNA sequencing. genomics shapeit 4.0.0 4.2.2 Website SHAPEIT4 is a fast and accurate method for estimation of haplotypes (aka phasing) for SNP array and high coverage sequencing data. genomics sra-tools 2.11.03.0.7 Website The SRA Toolkit and SDK from NCBI is a collection of tools and libraries for using data in the INSDC Sequence Read Archives. genomics star 2.5.4b2.7.10b Website STAR: ultrafast universal RNA-seq aligner. genomics stringtie 2.2.1 Website StringTie is a fast and highly efficient assembler of RNA-Seq alignments into potential transcripts. genomics tophat 2.1.1 Website TopHat is a fast splice junction mapper for RNA-Seq reads. genomics trim_galore 0.5.0 Website Trim Galore! is a wrapper script to automate quality and adapter trimming as well as quality control, with some added functionality to remove biased methylation positions for RRBS sequence files. genomics trinity 2.8.42.13.1 Website Trinity RNA-Seq de novo transcriptome assembly. genomics vcflib 1.0.0 Website A C++ library for parsing and manipulating VCF files. genomics vcftools 0.1.15 Website VCFtools is a program package designed for working with VCF files, such as those generated by the 1000 Genomes Project. genomics viennarna 2.5.1 Website A C code library and several stand-alone programs for the prediction and comparison of RNA secondary structures. molecular biology dssp 4.0.3 Website DSSP is an application to assign secondary structure to proteins. molecular biology libcifpp 3.0.0 Website Library to work with mmCIF and PDB files. neurology afni 17.2.0718.2.0421.3.00 Website AFNI (Analysis of Functional NeuroImages) is a set of C programs for processing, analyzing, and displaying functional MRI (FMRI) data - a technique for mapping human brain activity. neurology ants 2.1.02.3.12.4.0 Website ANTs computes high-dimensional mappings to capture the statistics of brain structure and function. neurology bart 0.7.00 Website BART is a toolbox for Computational Magnetic Resonance Imaging. neurology dcm2niix 1.0.201712151.0.20211006 Website dcm2niix is a program esigned to convert neuroimaging data from the DICOM format to the NIfTI format. neurology freesurfer 6.0.17.1.17.2.07.3.27.4.1 Website An open source software suite for processing and analyzing (human) brain MRI images. neurology fsl 5.0.10 6.0.7.10 Website FSL is a comprehensive library of analysis tools for FMRI, MRI and DTI brain imaging data. neurology mricron 20160502 Website MRIcron is a cross-platform NIfTI format image viewer. neurology mrtrix 0.3.163.0.3 Website MRtrix3 provides a set of tools to perform various types of diffusion MRI analyses, from various forms of tractography through to next-generation group-level analyses. neurology py-mdt 0.10.9_py36 Website The Maastricht Diffusion Toolbox, MDT, is a framework and library for parallelized (GPU and multi-core CPU) diffusion Magnetic Resonance Imaging (MRI) modeling. neurology py-nipype 1.1.3_py271.1.3_py36 Website Nipype is a Python project that provides a uniform interface to existing neuroimaging software and facilitates interaction between these packages within a single workflow. neurology spm 12 Website The SPM software package has been designed for the analysis of brain imaging data sequences. The sequences can be a series of images from different cohorts, or time-series from the same subject. neurology workbench 1.3.1 Website Connectome Workbench is an open source, freely available visualization and discovery tool used to map neuroimaging data, especially data generated by the Human Connectome Project. pathology openslide 3.4.1 Website OpenSlide is a C library that provides a simple interface to read whole-slide images (also known as virtual slides). pathology py-openslide-python 1.1.1_py27 1.1.1_py36 Website OpenSlide Python is a Python interface to the OpenSlide library. phylogenetics py-ete 3.0.0_py27 Website A Python framework for the analysis and visualization of trees. population genetics py-admixfrog 0.6.1_py36 Website Admixfrog is a HMM to infer ancestry frogments (fragments) from low-coverage, contaminated data. radiology nbia-data-retriever 4.2 Website The NBIA Data Retriever is an application to download radiology images from the TCIA Radiology Portal. workflow management nextflow 23.04.3 Website Nextflow is a bioinformatics workflow manager that enables the development of portable and reproducible workflows."},{"location":"docs/software/list/#chemistry","title":"chemistry","text":"Field Module\u00a0name Version(s) URL Description cheminformatics py-rdkit 2018.09.1_py27 2018.09.1_py362022.09.1_py39 Website RDKit is a collection of cheminformatics and machine-learning software written in C++ and Python. computational chemistry gaussian g16.A03 g16.B01 Website Gaussian is a general purpose computational chemistry software package. computational chemistry libint 1.1.42.0.32.6.0 Website Libint computes molecular integrals. computational chemistry libxc 3.0.05.2.2 Website Libxc is a library of exchange-correlation functionals for density-functional theory. computational chemistry nwchem 6.8 7.0.2 Website NWChem is an ab initio computational chemistry software package which also includes quantum chemical and molecular dynamics functionality. computational chemistry py-ase 3.14.1_py273.22.1_py39 Website The Atomic Simulation Environment (ASE) is a set of tools and Python modules for setting up, manipulating, running, visualizing and analyzing atomistic simulations. computational chemistry schrodinger 2021-1 2017-3 2018-1 2018-2 2019-2 2020-2 2022-3 2024-1 Website Schr\u00f6dinger Suites (Small-molecule Drug Discovery Suite, Material Science Suite, Biologics Suite) provide a set of molecular modelling software. computational chemistry vasp 5.4.1 6.1.1 6.3.2 6.4.1 Website The Vienna Ab initio Simulation Package (VASP) is a computer program for atomic scale materials modelling, e.g. electronic structure calculations and quantum-mechanical molecular dynamics, from first principles. crystallography clipper 2.1.20180802 Website Crystallographic automation and complex data manipulation libraries. crystallography mmdb2 2.0.20 Website A C++ toolkit for working with macromolecular coordinate files. crystallography ssm 1.4 Website A macromolecular superposition library. crystallography vesta 3.4.4 Website VESTA is a 3D visualization program for structural models, volumetric data such as electron/nuclear densities, and crystal morphologies. docking gnina 1.0.2 Website A deep learning framework for molecular docking electrostatics apbs 1.5 Website APBS solves the equations of continuum electrostatics for large biomolecular assemblages. molecular dynamics gromacs 2016.3 2018 2021.3 2023.1 Website GROMACS is a versatile package to perform molecular dynamics, i.e. simulate the Newtonian equations of motion for systems with hundreds to millions of particles. molecular dynamics lammps 20180316 20200303 20230802 Website LAMMPS is a classical molecular dynamics code that models an ensemble of particles in a liquid, solid, or gaseous state. molecular dynamics openmm 7.1.1 Website A high performance toolkit for molecular simulation. molecular dynamics plumed 2.3.2 Website PLUMED is an open source library for free energy calculations in molecular systems. molecular dynamics py-raspa2 2.0.3_py27 Website RASPA2 is a general purpose classical simulation package that can be used for the simulation of molecules in gases, fluids, zeolites, aluminosilicates, metal-organic frameworks, carbon nanotubes and external fields. molecular dynamics qbox 1.65.0 Website Qbox is a First-Principles Molecular Dynamics code. molecular dynamics quip 20170901 20220426 Website The QUIP package is a collection of software tools to carry out molecular dynamics simulations. quantum chemistry cp2k 4.1 9.1 Website CP2K is a quantum chemistry and solid state physics software package that can perform atomistic simulations of solid state, liquid, molecular, periodic, material, crystal, and biological systems. quantum chemistry ocean 2.9.7 Website OCEAN is a versatile and user-friendly package for calculating core edge spectroscopy including excitonic effects. quantum chemistry orca 4.2.1 5.0.0 5.0.3 Website ORCA is a flexible, efficient and easy-to-use general purpose tool for quantum chemistry. quantum chemistry quantum-espresso 6.2.1 6.6 7.0 7.1 Website Quantum ESPRESSO is an integrated suite of Open-Source computer codes for electronic-structure calculations and materials modeling at the nanoscale. It is based on density-functional theory, plane waves, and pseudopotentials. quantum chemistry quantum-espresso_gpu 1.1 7.0 7.1 Website Quantum ESPRESSO is an integrated suite of Open-Source computer codes for electronic-structure calculations and materials modeling at the nanoscale. It is based on density-functional theory, plane waves, and pseudopotentials. quantum chemistry terachem 1.95A 1.96H-beta Website TeraChem is general purpose quantum chemistry software designed to run on NVIDIA GPU architectures. tools openbabel 3.1.1 Website Open Babel is a chemical toolbox designed to speak the many languages of chemical data. tools py-openbabel 3.1.1.1_py39 Website Python bindings for Open Babel."},{"location":"docs/software/list/#devel","title":"devel","text":"Field Module\u00a0name Version(s) URL Description build bazel 0.16.10.26.10.29.1 Website Bazel is a fast, scalable, multi-language and extensible build system. build bazelisk 1.3.01.8.0 Website Bazelisk is a wrapper for Bazel written in Go. build binutils 2.38 Website The GNU Binutils are a collection of binary tools. build cmake 3.8.13.11.13.13.13.20.33.24.2 Website CMake is an extensible, open-source system that manages the build process in an operating system and in a compiler-independent manner. build kerl 1.8.5 Website Kerl is a tool to easily build and install Erlang/OTP instances. build make 4.4 Website GNU Make is a tool which controls the generation of executables and other non-source files of a program from the program's source files. build ninja 1.9.0 Website Ninja is a small build system with a focus on speed. build py-meson 0.51.1_py36 Website Meson is an open source build system meant to be both extremely fast, and, even more importantly, as user friendly as possible. build py-scons 3.0.5_py273.0.5_py364.7.0_py312 Website SCons is an Open Source software construction tool. compiler aocc 2.1.02.2.0 Website AMD Optimizing C/C++ Compiler - AOCC is a highly optimized C, C++ and Fortran compiler for x86 targets especially for Zen based AMD processors. compiler gcc 6.3.0 7.1.07.3.08.1.09.1.010.1.010.3.012.1.0 Website The GNU Compiler Collection includes front ends for C, C++, Fortran, Java, and Go, as well as libraries for these languages (libstdc++, libgcj,...). compiler icc 2017.u22018.u120182019 Website Intel C++ Compiler, also known as icc or icl, is a group of C and C++ compilers from Intel compiler ifort 2017.u22018.u120182019 Website Intel Fortran Compiler, also known as ifort, is a group of Fortran compilers from Intel compiler llvm 7.0.0 3.8.14.0.05.0.09.0.115.0.3 Website The LLVM Project is a collection of modular and reusable compiler and toolchain technologies. Clang is an LLVM native C/C++/Objective-C compiler, compiler nvhpc 21.5 21.7 22.3 23.3 Website NVIDIA HPC Software Development Kit (SDK) including C, C++, and Fortran compilers. compiler pgi 19.10 Website PGI compilers and tools, including Open MPI (Community Edition). compiler smlnj 110.81 Website Standard ML of New Jersey (abbreviated SML/NJ) is a compiler for the Standard ML '97 programming language. data h5utils 1.12.1 Website h5utils is a set of utilities for visualization and conversion of scientific data in the free, portable HDF5 format. data hdf5 1.10.6 1.10.0p11.10.2 1.12.01.12.2 Website HDF5 is a data model, library, and file format for storing and managing data. It supports an unlimited variety of datatypes, and is designed for flexible and efficient I/O and for high volume and complex data. data hiredis 0.13.3 Website Hiredis is a minimalistic C client library for the Redis database. data ncl 6.4.06.6.2 Website NCL is a free interpreted language designed specifically for scientific data processing and visualization. data nco 4.8.0 5.0.6 Website The NCO toolkit manipulates and analyzes data stored in netCDF-accessible formats. data netcdf 4.4.1.14.8.1 Website NetCDF is a set of software libraries and self-describing, machine-independent data formats that support the creation, access, and sharing of array-oriented scientific data. data netcdf-c 4.9.0 Website NetCDF is a set of software libraries and self-describing, machine-independent data formats that support the creation, access, and sharing of array-oriented scientific data. This module provides C libraries. data netcdf-cxx 4.3.1 Website NetCDF is a set of software libraries and self-describing, machine-independent data formats that support the creation, access, and sharing of array-oriented scientific data. This module provides C++ libraries. data netcdf-fortran 4.5.4 Website NetCDF is a set of software libraries and self-describing, machine-independent data formats that support the creation, access, and sharing of array-oriented scientific data. This module provides Fortran libraries. data pnetcdf 1.8.1 1.12.3 Website Parallel netCDF (PnetCDF) is a parallel I/O library for accessing NetCDF files in CDF-1, 2, and 5 formats. data protobuf 3.4.0 3.20.021.9 Website Protocol Buffers (a.k.a., protobuf) are Google's language-neutral, platform-neutral, extensible mechanism for serializing structured data. data py-pandas 0.23.0_py270.23.0_py361.0.3_py361.3.1_py392.0.1_py392.2.1_py312 Website pandas is an open source, BSD-licensed library providing high-performance, easy-to-use data structures and data analysis tools for the Python programming language. data py-protobuf 3.4.0_py27 3.4.0_py363.6.1_py273.6.1_py363.15.8_py363.20.1_py394.21.9_py39 Website Python bindings for Google's Protocol Buffers data interchange format. data redis 4.0.1 Website Redis is an open source, in-memory data structure store, used as a database, cache and message broker. data zfp 1.0.0 Website zfp is an open-source library for compressed floating-point and integer arrays that support high throughput read and write random access. data analytics hadoop 3.1.0 3.3.1 Website The Apache Hadoop software library is a framework that allows for the distributed processing of large data sets across clusters of computers using simple programming models. data analytics py-sparkhpc 0.3_py27 Website Launching and controlling spark on HPC clusters data analytics spark 2.3.0 3.2.1 Website Apache Spark\u2122 is a unified analytics engine for large-scale data processing. debug gdb 8.2.1 Website GDB is the GNU Project debugger. debug valgrind 3.14.0 Website Valgrind is an instrumentation framework for building dynamic analysis tools. engine v8 8.4.371.22 Website V8 is Google\u2019s open source high-performance JavaScript and WebAssembly engine, written in C++. framework dotnet 2.1.5006.0.413 Website .NET is a free, cross-platform, open source developer platform for building many different types of applications. framework ga 5.8.2 Website Global Arrays (GA) is a Partitioned Global Address Space (PGAS) programming model. framework py-kedro 0.18.0_py39 Website Kedro is an open-source Python framework for creating reproducible, maintainable and modular data science code. IDE code-server 4.16.1 Website Run VS Code on any machine anywhere and access it in the browser. IDE py-jupytext 1.16.1_py39 Website Jupyter Notebooks as Markdown Documents, Julia, Python or R scripts. language cuda 9.0.176 8.0.61 9.1.85 9.2.88 9.2.148 10.0.130 10.1.105 10.1.168 10.2.89 11.0.3 11.1.1 11.2.0 11.3.1 11.4.1 11.5.0 11.7.1 12.0.0 12.1.1 12.2.0 12.4.0 Website CUDA is a parallel computing platform and application programming interface (API) model created by Nvidia. It allows software developers and software engineers to use a CUDA-enabled graphics processing unit (GPU) for general purpose processing. language erlang 21.3 Website Erlang is a programming language used to build massively scalable soft real-time systems with requirements on high availability. language gcl 2.6.14 Website GCL is the official Common Lisp for the GNU project. language go 1.91.141.18.2 Website Go is an open source programming language that makes it easy to build simple, reliable, and efficient software. language guile 2.0.112.2.2 Website GNU Guile is the preferred extension system for the GNU Project, which features an implementation of the Scheme programming language. language haskell 8.6.5 Website Haskell is a statically typed, purely functional programming language with type inference and lazy evaluation. language java 1.8.0_131 11.0.1112.0.217.0.418.0.2 Website Java is a general-purpose computer programming language that is concurrent, class-based, object-oriented,[14] and specifically designed to have as few implementation dependencies as possible. language julia 1.3.11.4.01.5.11.6.21.7.21.8.41.9.01.10.0 Website Julia is a high-level, high-performance dynamic programming language for numerical computing. language lua 5.3.4 Website Lua is a powerful, efficient, lightweight, embeddable scripting language. It supports procedural programming, object-oriented programming, functional programming, data-driven programming, and data description. language luarocks 2.4.3 Website LuaRocks is the package manager for Lua modules. language manticore 20180301 Website Manticore is a high-level parallel programming language aimed at general-purpose applications running on multi-core processors. language nodejs 8.9.49.5.016.13.018.15.0 Website Node.js is a JavaScript runtime built on Chrome's V8 JavaScript engine. It provides the npm package manager. language perl 5.26.05.36.1 Website Perl 5 is a highly capable, feature-rich programming language with over 29 years of development. language php 7.3.0 Website PHP (recursive acronym for PHP: Hypertext Preprocessor) is an open source general-purpose scripting language that is especially suited for web development. language py-cython 0.27.3_py270.27.3_py360.29.21_py360.29.28_py39 Website Cython is an optimising static compiler for both the Python programming language and the extended Cython programming language (based on Pyrex). language py-ipython 5.4.1_py27 6.1.0_py368.3.0_py398.22.2_py312 Website IPython is a command shell for interactive computing in multiple programming languages, originally developed for the Python programming language. language py-jupyter 1.0.0_py27 1.0.0_py361.0.0_py39 Website Jupyter is a browser-based interactive notebook for programming, mathematics, and data science. It supports a number of languages via plugins. language py-jupyterlab 2.3.2_py364.0.8_py39 Website Jupyter is a browser-based interactive notebook for programming, mathematics, and data science. It supports a number of languages via plugins. language python 2.7.13 3.6.13.9.03.12.1 Website Python is an interpreted, interactive, object-oriented programming language. language ruby 2.4.12.7.13.1.2 Website A dynamic, open source programming language with a focus on simplicity and productivity. It has an elegant syntax that is natural to read and easy to write. language rust 1.35.01.56.11.63.01.72.0 Website A language empowering everyone to build reliable and efficient software. language scala 2.12.6 Website Scala combines object-oriented and functional programming in one concise, high-level language. lib ant 1.10.1 Website Apache Ant is a Java library and command-line tool whose mission is to drive processes described in build files as targets and extension points dependent upon each other. lib boost 1.64.01.69.0 1.75.0 1.76.0 1.79.0 Website Boost is a set of libraries for the C++ programming language that provide support for tasks and structures such as linear algebra, pseudorandom number generation, multithreading, image processing, regular expressions, and unit testing. lib chai 2.2.2 Website Copy-hiding array abstraction to automatically migrate data between memory spaces. lib cnmem 1.0.0 Website CNMeM is a simple library to help the Deep Learning frameworks manage CUDA memory. lib conduit 0.5.1 Website Simplified Data Exchange for HPC Simulations. lib cub 1.7.3 1.10.0 Website CUB is a flexible library of cooperative threadblock primitives and other utilities for CUDA kernel programming. lib cutlass 0.1.03.1.0 Website CUTLASS is a collection of CUDA C++ template abstractions for implementing high-performance matrix-multiplication (GEMM) at all levels and scales within CUDA. lib dtcmp 1.1.3 Website Datatype Compare (DTCMP) Library for sorting and ranking distributed data using MPI. lib eigen 3.3.33.4.0 Website Eigen is a C++ template library for linear algebra: matrices, vectors, numerical solvers, and related algorithms. lib libcircle 0.3.0 Website libcircle is an API for distributing embarrassingly parallel workloads using self-stabilization. lib libctl 3.2.24.0.14.5.0 Website libctl is a library for supporting flexible control files in scientific simulations. lib libevent 2.1.12 Website The libevent API provides a mechanism to execute a callback function when a specific event occurs on a file descriptor or after a timeout has been reached. lib libgpuarray 0.7.5 Website Library to manipulate tensors on the GPU. lib libtree 2.0.0 Website libtree prints shared object dependencies as a tree. lib lwgrp 1.0.4 Website The Light-weight Group Library provides methods for MPI codes to quickly create and destroy process groups. lib nccl 1.3.4 2.0.4 2.1.15 2.2.13 2.3.7 2.4.8 2.5.6 2.8.4 2.11.4 2.17.1 2.20.5 Website NCCL (pronounced 'Nickel') is a stand-alone library of standard collective communication routines, such as all-gather, reduce, broadcast, etc., that have been optimized to achieve high bandwidth over PCIe. lib pugixml 1.12.1 Website Light-weight, simple and fast XML parser for C++ with XPath support. lib py-cutlass 3.1.0_py39 Website Python interface for CUTLASS lib py-h5py 2.7.1_py27 2.8.0_py362.10.0_py363.1.0_py363.7.0_py393.10.0_py312 Website The h5py package is a Pythonic interface to the HDF5 binary data format. lib py-netcdf4 1.3.1_py27 1.3.1_py36 Website netcdf4-python is a Python interface to the netCDF C library. lib py-nose 1.3.7_py39 Website nose is nicer testing for python. lib py-numba 0.35.0_py27 0.35.0_py360.53.1_py360.54.1_py39 Website Numba is a compiler for Python array and numerical functions that gives you the power to speed up your applications with high performance functions written directly in Python.. lib py-parsl 1.2.0_py39 Website Parsl is a flexible and scalable parallel programming library for Python. lib py-pycuda 2017.1.1_py27 2021.1_py36 Website PyCUDA lets you access Nvidia\u2018s CUDA parallel computation API from Python. lib py-rmm 23.04.00_py39 Website Python interface for RMM lib py-schwimmbad 0.3.1_py36 0.3.2_py39 Website schwimmbad provides a uniform interface to parallel processing pools and enables switching easily between local development (e.g., serial processing or with multiprocessing) and deployment on a cluster or supercomputer (via, e.g., MPI or JobLib). lib py-scikit-image 0.13.0_py270.14.0_py270.15.0_py270.15.0_py360.17.2_py360.19.3_py390.20.0_py39 Website scikit-image is a collection of algorithms for image processing. lib rabbitmq 3.7.13 Website RabbitMQ is an open-source message broker. lib raja 0.12.1 Website Collection of C++ software abstractions that enable architecture portability for HPC applications. lib rmm 23.04.00 Website RAPIDS Memory Manager library lib swig 3.0.12 Website SWIG is an interface compiler that connects programs written in C and C++ with scripting languages such as Perl, Python, Ruby, and Tcl. lib tbb 2017.u22018.u120182019 Website Intel\u00ae Threading Building Blocks (Intel\u00ae TBB) is a widely used C++ library for shared-memory parallel programming and heterogeneous computing (intra-node distributed memory programming). lib trilinos 12.12.1 Website Trilinos is a collection of open-source software libraries, called packages, intended to be used as building blocks for the development of scientific applications. lib xsimd 7.6.08.1.0 Website C++ wrappers for SIMD intrinsics and parallelized, optimized mathematical functions (SSE, AVX, NEON, AVX512) lib zeromq 4.2.2 Website ZeroMQ (also spelled \u00d8MQ, 0MQ or ZMQ) is a high-performance asynchronous messaging library, aimed at use in distributed or concurrent applications. mpi hpcx 2.6.0 2.7.0 2.8.1 Website Mellanox HPC-X toolkit is a comprehensive software package that includes MPI and SHMEM/PGAS communications libraries. mpi impi 2017.u2 2018.u1 2018 2019 Website Intel\u00ae MPI Library is a multi-fabric message passing library that implements the Message Passing Interface, version 3.1 (MPI-3.1) specification. mpi openmpi 4.1.2 2.0.2 2.1.1 3.1.2 4.0.3 4.0.5 4.1.0 4.1.6 Website The Open MPI Project is an open source Message Passing Interface implementation that is developed and maintained by a consortium of academic, research, and industry partners. mpi py-mpi4py 3.0.0_py27 3.0.3_py36 3.1.3_py39 3.1.5_py312 Website MPI for Python provides Python bindings for the Message Passing Interface (MPI) standard. It is implemented on top of the MPI-\u00bd/3 specification and exposes an API which grounds on the standard MPI-2 C++ bindings. networking gasnet 1.30.0 Website GASNet is a language-independent, low-level networking layer that provides network-independent, high-performance communication primitives tailored for implementing parallel global address space SPMD languages and libraries. networking libfabric 1.6.01.6.21.7.11.9.11.10.11.11.11.14.0 Website The Open Fabrics Interfaces (OFI) is a framework focused on exporting fabric communication services to applications. Libfabric is the library that defines and exports the user-space API of OFI. networking py-ucx-py 0.24.0_py39 Website Python bindinbgs for UCX. networking ucx 1.3.11.8.1 1.9.0 1.10.0 1.12.1 1.15.0 Website UCX is a communication library implementing high-performance messaging for MPI/PGAS frameworks. parser antlr 2.7.7 Website ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, or translating structured text or binary files. parser xerces-c 3.2.1 Website Xerces-C++ is a validating XML parser written in a portable subset of C++. profiling amd-uprof 3.3.462 Website AMD uProf is a performance analysis tool for applications. profiling darshan 3.4.4 Website Darshan is a scalable HPC I/O characterization tool. runtime starpu 1.3.2 Website StarPU is a unified runtime system that offers support for heterogeneous multicore architectures"},{"location":"docs/software/list/#math","title":"math","text":"Field Module\u00a0name Version(s) URL Description computational geometry cgal 4.10 Website The Computational Geometry Algorithms Library (CGAL) is a C++ library that aims to provide easy access to efficient and reliable algorithms in computational geometry. computational geometry dealii 9.4.1 Website deal.II is a C++ program library targeted at the computational solution of partial differential equations using adaptive finite elements. computational geometry gmsh 4.10.1 Website Gmsh is an open source 3D finite element mesh generator with a built-in CAD engine and post-processor. computational geometry opencascade 7.6.2 Website Open CASCADE Technology (OCCT) is an open-source full-scale 3D geometry library computational geometry polymake 4.10 Website polymake is open source software for research in polyhedral geometry. computational geometry qhull 2015.2 Website Qhull computes the convex hull, Delaunay triangulation, Voronoi diagram, halfspace intersection about a point, furthest-site Delaunay triangulation, and furthest-site Voronoi diagram. computational geometry silo 4.11 Website A mesh and field I/O library and scientific database. deep learning cudnn 6.0 7.0.1 7.0.4 7.0.5 7.1.4 7.4.1.5 7.6.4 7.6.5 8.1.1.33 8.3.3.40 8.6.0.163 8.9.0.131 9.0.0.312 Website NVIDIA cuDNN is a GPU-accelerated library of primitives for deep neural networks. deep learning cutensor 1.2.0 1.5.0.3 Website GPU-accelerated tensor linear algebra library. deep learning py-gym 0.21.0_py39 Website Gym is a toolkit for developing and comparing reinforcement learning algorithms. deep learning py-horovod 0.12.1_py27 0.12.1_py36 Website Horovod is a distributed training framework for TensorFlow. The goal of Horovod is to make distributed Deep Learning fast and easy to use. deep learning py-keras 2.1.5_py27 2.0.8_py27 2.1.5_py36 2.2.4_py27 2.2.4_py36 2.3.1_py36 Website Keras is a high-level neural networks API, written in Python and capable of running on top of TensorFlow, CNTK, or Theano. deep learning py-onnx 1.0.1_py271.8.1_py361.12.0_py39 Website ONNX is a open format to represent deep learning models. deep learning py-pytorch 0.3.0_py27 0.2.0_py27 0.2.0_py36 0.3.0_py36 1.0.0_py27 1.0.0_py36 1.4.0_py36 1.6.0_py36 1.8.1_py39 1.11.0_py39 2.0.0_py39 2.2.1_py312 Website PyTorch is a deep learning framework that puts Python first. deep learning py-tensorboardx 1.8_py27 Website TensorboardX is TensorBoard\u2122 for PyTorch (and Chainer, MXNet, NumPy...) deep learning py-tensorflow 2.1.0_py36 1.4.0_py27 1.5.0_py27 1.5.0_py36 1.9.0_py27 1.9.0_py36 2.4.1_py36 2.6.2_py36 2.9.1_py39 2.10.0_py39 Website TensorFlow\u2122 is an open source software library for numerical computation using data flow graphs. deep learning py-tensorlayer 1.6.3_py27 Website TensorLayer is a Deep Learning (DL) and Reinforcement Learning (RL) library extended from Google TensorFlow. deep learning py-tensorrt 8.5.1.7_py39 10.0.1_py312 Website Python bindings for the TensorRT library. deep learning py-theano 1.0.1_py27 Website Theano is a Python library that allows you to define, optimize, and evaluate mathematical expressions involving multi-dimensional arrays efficiently. deep learning py-torchvision 0.15.1_py390.17.1_py312 Website Datasets, model architectures, and common image transformations for computer vision for PyTorch. deep learning py-triton 1.0.0_py39 Website Triton is a language and compiler for writing highly efficient custom Deep-Learning primitives. deep learning tensorrt 3.0.1 3.0.4 4.0.1.6 5.0.2.6 6.0.1.8 7.0.0.11 7.2.3.4 8.5.1.7 10.0.1.6 Website NVIDIA TensorRT\u2122 is a high-performance deep learning inference optimizer and runtime that delivers low latency, high-throughput inference for deep learning applications. deep learning torch 20180202 Website Torch is a scientific computing framework with wide support for machine learning algorithms that puts GPUs first. graph computing bliss 0.73 Website A tool for computing automorphism groups and canonical forms of graphs. lib opencv 3.3.0 4.5.2 4.5.5 4.7.0 4.9.0 Website OpenCV (Open Source Computer Vision Library) is an open source computer vision and machine learning software library. linear algebra armadillo 8.200.1 Website Armadillo is a high quality linear algebra library (matrix maths) for the C++ language, aiming towards a good balance between speed and ease of use. linear algebra cusparselt 0.2.0.1 Website NVIDIA cuSPARSELt is a high-performance CUDA library for sparse matrix-matrix multiplication. machine learning py-accelerate 0.29.3_py312 Website Huggingface Accelerate is a library that enables the same PyTorch code to be run across any distributed configuration. machine learning py-datasets 2.18.0_py312 Website Hugging Face Datasets is a library for easily accessing and sharing datasets for Audio, Computer Vision, and Natural Language Processing (NLP) tasks. machine learning py-huggingface-hub 0.22.1_py312 Website The huggingface_hub library allows you to interact with the Hugging Face Hub, a machine learning platform for creators and collaborators. machine learning py-kaolin 0.15.0_py39 Website A PyTorch Library for Accelerating 3D Deep Learning Research. machine learning py-safetensors 0.4.2_py312 Website Simple, safe way to store and distribute tensors. machine learning py-scikit-learn 0.19.1_py27 0.19.1_py360.24.2_py361.0.2_py391.3.2_py39 Website Scikit-learn is a free software machine learning library for the Python programming language. machine learning py-tinygrad 0.8.0_py312 Website tinygrad is a deep learning framework that aims to provide a balance between simplicity and functionality. machine learning py-tokenizers 0.15.2_py312 Website Hugging Face Tokenizers provides an implementation of today\u2019s most used tokenizers, with a focus on performance and versatility.T machine learning py-torch-nvidia-apex 23.08_py312 Website A PyTorch Extension: Tools for easy mixed precision and distributed training in Pytorch. machine learning py-torchtune 0.1.1_py312 Website torchtune is a PyTorch-native library for easily authoring, fine-tuning and experimenting with LLMs. machine learning py-transformers 4.39.1_py312 Website Hugging Face Transformers provides APIs and tools to easily download and train state-of-the-art pretrained models. numerical analysis matlab R2022b R2017a R2017b R2018a R2019a R2020a R2024a Website MATLAB is a multi-paradigm numerical computing environment and proprietary programming language developed by MathWorks. numerical analysis octave 4.2.1 Website GNU Octave is a high-level language primarily intended for numerical computations. numerical library arpack 3.5.03.7.0 3.9.0 Website Collection of Fortran77 subroutines designed to solve large scale eigenvalue problems. numerical library blis 2.12.2.43.1.0 Website BLIS is a portable software framework for instantiating high-performance BLAS-like dense linear algebra libraries. numerical library fftw 2.1.53.3.6 3.3.8 3.3.93.3.10 Website The Fastest Fourier Transform in the West (FFTW) is a software library for computing discrete Fourier transforms (DFTs). numerical library flexiblas 3.1.3 Website FlexiBLAS is a BLAS and LAPACK wrapper library with runtime exchangeable backends. numerical library flint 2.9.0 Website FLINT is a C library for doing number theory. numerical library glpk 4.63 Website The GLPK (GNU Linear Programming Kit) package is intended for solving large-scale linear programming (LP), mixed integer programming (MIP), and other related problems. numerical library gmp 6.1.26.2.1 Website GMP is a free library for arbitrary precision arithmetic, operating on signed integers, rational numbers, and floating-point numbers. numerical library gsl 1.162.32.7 Website The GNU Scientific Library (GSL) is a numerical library for C and C++ programmers. The library provides a wide range of mathematical routines such as random number generators, special functions and least-squares fitting. numerical library harminv 1.4.1 Website harminv is a program designed to solve the problem of harmonic inversion: given a time series consisting of a sum of sinusoids (modes), extract their frequencies and amplitudes. numerical library hypre 2.20.0 Website HYPRE is a library of high performance preconditioners and solvers featuring multigrid methods for the solution of large, sparse linear systems of equations on massively parallel computers. numerical library imkl 2017.u22018.u120182019 Website Intel Math Kernel Library (Intel MKL) is a library of optimized math routines for science, engineering, and financial applications. Core math functions include BLAS, LAPACK, ScaLAPACK, sparse solvers, fast Fourier transforms, and vector math.[3] The routines in MKL are hand-optimized specifically for Intel processors numerical library libflame 2.12.2.43.1.0 Website libflame is a portable library for dense matrix computations, providing much of the functionality present in LAPACK numerical library libxsmm 1.8.11.17 Website LIBXSMM is a library for small dense and small sparse matrix-matrix multiplications as well as for deep learning primitives such as small convolutions numerical library metis 5.1.0 Website METIS is a set of serial programs for partitioning graphs, partitioning finite element meshes, and producing fill reducing orderings for sparse matrices. numerical library mpc 1.2.1 Website GNU MPC is a C library for the arithmetic of complex numbers with arbitrarily high precision and correct rounding of the result. numerical library mpfr 3.1.54.1.0 Website The MPFR library is a C library for multiple-precision floating-point computations with correct rounding. numerical library mumps 5.1.2 Website A parallel sparse direct solver. numerical library openblas 0.3.10 0.2.190.3.40.3.90.3.200.3.26 Website OpenBLAS is an optimized BLAS library numerical library parmetis 4.0.3 Website ParMETIS is an MPI-based parallel library that implements a variety of algorithms for partitioning unstructured graphs, meshes, and for computing fill-reducing orderings of sparse matrices. numerical library petsc 3.10.3 3.18.5 Website PETSc, the Portable, Extensible Toolkit for Scientific Computation, is a suite of data structures and routines for the scalable (parallel) solution of scientific applications modeled by partial differential equations. numerical library py-autograd 1.0_py39 Website Autograd can automatically differentiate native Python and Numpy code. numerical library py-cupy 7.8.0_py36 10.2.0_py39 12.1.0_py39 Website CuPy is an implementation of NumPy-compatible multi-dimensional array on CUDA. numerical library py-gmpy2 2.0.8_py36 Website gmpy2 is a C-coded Python extension module that supports multiple-precision arithmetic. numerical library py-jax 0.4.7_py39 Website JAX is Autograd and XLA, brought together for high-performance numerical computing. numerical library py-jaxlib 0.4.7_py39 Website XLA library for Jax. numerical library py-numpy 1.14.3_py27 1.14.3_py361.17.2_py361.18.1_py361.19.2_py361.20.3_py391.24.2_py391.26.3_py312 Website NumPy is the fundamental package for scientific computing with Python. numerical library py-petsc4py 3.18.5_py39 Website Python bindings for PETSc, the Portable, Extensible Toolkit for Scientific Computation. numerical library py-psbody-mesh 0.4_py39 Website The MPI-IS Mesh Processing Library contains core functions for manipulating meshes and visualizing them. numerical library py-pyublas 2017.1_py27 Website PyUblas provides a seamless glue layer between Numpy and Boost.Ublas for use with Boost.Python. numerical library py-pywavelets 1.6.0_py391.6.0_py312 Website PyWavelets is a free Open Source library for wavelet transforms in Python. numerical library py-scipy 1.1.0_py27 1.1.0_py361.4.1_py361.6.3_py391.10.1_py391.12.0_py312 Website The SciPy library provides many user-friendly and efficient numerical routines such as routines for numerical integration and optimization. numerical library py-slepc4py 3.18.2_py39 Website Python bindings for SLEPc. numerical library py-tabmat 3.1.2_py39 Website Efficient matrix representations for working with tabular data. numerical library qrupdate 1.1.2 Website qrupdate is a Fortran library for fast updates of QR and Cholesky decompositions. numerical library scalapack 2.0.2 2.1 2.2.0 Website ScaLAPACK is a library of high-performance linear algebra routines for parallel distributed memory machines. numerical library scotch 6.0.4 Website Software package and libraries for sequential and parallel graph partitioning, static mapping and clustering, sequential mesh and hypergraph partitioning, and sequential and parallel sparse matrix block ordering. numerical library slepc 3.18.2 Website SLEPc is a Scalable Library for Eigenvalue Problem Computations. numerical library suitesparse 7.4.0 Website SuiteSparse is a suite of sparse matrix algorithms. numerical library superlu 5.2.1 Website SuperLU is a general purpose library for the direct solution of large, sparse, nonsymmetric systems of linear equations. numerical library tetgen 1.6.0 Website TetGen provides various features to generate good quality and adaptive tetrahedral meshes suitable for numerical methods, such as finite element or finite volume methods. numerical library xblas 1.0.248 Website Extra precise basic linear algebra subroutines. optimization gurobi 7.5.18.0.1_py278.0.1_py369.0.3_py3610.0.1_py39 Website The Gurobi Optimizer is a commercial optimization solver for mathematical programming. optimization knitro 10.3.0 12.4.0 Website Artelys Knitro is an optimization solver for difficult large-scale nonlinear problems. optimization nlopt 2.6.2 Website NLopt is a free/open-source library for nonlinear optimization. optimization octeract 3.3.0 Website Octeract Engine is a proprietary massively parallel deterministic global optimization solver for general Mixed-Integer Nonlinear Programs (MINLP). optimization py-optuna 2.10.0_py39 Website Optuna is an automatic hyperparameter optimization software framework, particularly designed for machine learning. optimization sundials 6.4.1 Website SUNDIALS is a family of software packages providing robust and efficient time integrators and nonlinear solvers that can easily be incorporated into existing simulation codes. scientific computing py-scipystack 1.0_py27 1.0_py36 Website The SciPy Stack is a collection of open source software for scientific computing in Python. It provides the following packages: numpy, scipy, matplotlib, ipython, jupyter, pandas, sympy and nose. statistics datamash 1.3 Website GNU datamash is a command-line program which performs basic numeric, textual and statistical operations on input textual data files. statistics jags 4.3.04.3.1 Website Just another Gibbs sampler (JAGS) is a program for simulation from Bayesian hierarchical models using Markov chain Monte Carlo (MCMC). statistics py-emcee 3.1.4_py39 Website The Python ensemble sampling toolkit for affine-invariant MCMC statistics py-glum 2.1.2_py39 Website glum is a fast, modern, Python-first GLM estimation library. statistics py-rpy2 2.8.6_py272.9.2_py36 Website rpy2 is an interface to R running embedded in a Python process. statistics R 4.2.0 3.4.03.5.13.6.14.0.24.1.24.3.2 Website R is a free software environment for statistical computing and graphics. statistics rstudio 1.3.1093 2023.09.1 Website RStudio is an integrated development environment (IDE) for R. It includes a console, syntax-highlighting editor that supports direct code execution, as well as tools for plotting, history, debugging and workspace management. statistics rstudio-desktop 2022.02.2-485 Website RStudio is an integrated development environment (IDE) for R. It includes a console, syntax-highlighting editor that supports direct code execution, as well as tools for plotting, history, debugging and workspace management. This is the X11/GUI version. statistics sas 9.4 Website SAS is a software suite developed by SAS Institute for advanced analytics, multivariate analyses, business intelligence, data management, and predictive analytics. statistics stata 15 14 16 17 18 Website Stata is a complete, integrated statistical software package that provides everything you need for data analysis, data management, and graphics. symbolic libmatheval 1.1.11 Website GNU libmatheval is a library (callable from C and Fortran) to parse and evaluate symbolic expressions input as text. symbolic maxima 5.47.0 Website Maxima is a system for the manipulation of symbolic and numerical expressions. symbolic py-pysr 0.12.3_py39 Website High-Performance Symbolic Regression in Python and Julia. symbolic py-sympy 1.1.1_py271.1.1_py361.11.1_py39 Website SymPy is a Python library for symbolic mathematics. technical computing mathematica 13.1.0 Website A symbolic language and platform for modern technical computing. topic modelling py-gensim 4.2.0_py39 Website Gensim is a Python library for topic modelling, document indexing and similarity retrieval with large corpora."},{"location":"docs/software/list/#physics","title":"physics","text":"Field Module\u00a0name Version(s) URL Description astronomy cfitsio 4.0.0 Website FITSIO is a library of C and Fortran subroutines for reading and writing data files in FITS (Flexible Image Transport System) data format. astronomy heasoft 6.22.16.26.1 Website HEAsoft is a Unified Release of the FTOOLS (General and mission-specific tools to manipulate FITS files) and XANADU (High-level, multi-mission tasks for X-ray astronomical spectral, timing, and imaging data analysis) software packages. astronomy py-astropy 4.0.1_py36 Website The Astropy Project is a community effort to develop a common core package for Astronomy in Python and foster an ecosystem of interoperable astronomy packages. astronomy py-lenstools 1.0_py36 Website This python package collects together a suite of widely used analysis tools in Weak Gravitational Lensing. astronomy py-namaster 1.2.2_py36 Website NaMaster is a C library, Python module and standalone program to compute full-sky angular cross-power spectra of masked fields with arbitrary spin and an arbitrary number of known contaminants using a pseudo-Cl (aka MASTER) approach. CFD su2 7.0.3 Website SU2: An Open-Source Suite for Multiphysics Simulation and Design cliemate modeling fre-nctools 2022.01 Website FRE-NCtools is a collection of tools to help with the creation and manipulation of netCDF files used for climate modeling. climate modeling cdo 1.9.7.12.1.1 Website CDO is a collection of command line Operators to manipulate and analyse Climate and NWP model Data. geophysics opensees 2.5.0 Website OpenSees is a software framework for developing applications to simulate the performance of structural and geotechnical systems subjected to earthquakes. geoscience gdal 3.4.1 2.2.13.5.2 Website GDAL is a translator library for raster and vector geospatial data formats. geoscience geos 3.6.2 3.11.03.12.1 Website GEOS (Geometry Engine - Open Source) is a C++ port of Java Topology Suite (JTS). geoscience geosx 0.2.0-20220523 Website GEOSX is a simulation framework for modeling coupled flow, transport, and geomechanics in the subsurface. geoscience gmtsar 6.2.2 Website An InSAR processing system based on GMT (Generic Mapping Tools). geoscience proj 8.2.1 4.9.39.1.0 Website PROJ is a generic coordinate transformation software that transforms geospatial coordinates from one coordinate reference system (CRS) to another. geoscience py-gdal-utils 3.4.1_py39 Website gdal-utils is the GDAL Python Utilities distribution. geoscience py-opendrift 1.0.3_py27 Website OpenDrift is a software for modeling the trajectories and fate of objects or substances drifting in the ocean, or even in the atmosphere. geoscience py-pyproj 1.9.5.1_py27 1.9.5.1_py363.4.0_py39 Website Python interface to PROJ4 library for cartographic transformations. geoscience swash 9.01a Website SWASH (an acronym of Simulating WAves till SHore) is a non-hydrostatic wave-flow model. geoscience udunits 2.2.26 Website The UDUNITS package from Unidata is a C-based package for the programatic handling of units of physical quantities. lib libgdsii 0.21 Website libGDSII C++ is a library and command-line utility for reading GDSII geometry files. magnetism mumax 3.10 Website mumax3 is a GPU-accelerated micromagnetic simulation program. materials science atat 3.36 Website Alloy Theoretic Automated Toolkit: a software toolkit for modeling coupled configurational and vibrational disorder in alloy systems. materials science py-megnet 1.3.0_py39 Website The MatErials Graph Network (MEGNet) is an implementation of DeepMind's graph networks[1] for universal machine learning in materials science. materials science py-pymatgen 2022.5.26_py39 Website Pymatgen (Python Materials Genomics) is a robust, open-source Python library for materials analysis. micromagnetics oommf 1.2b4 Website OOMMF is a set of portable, extensible public domain micromagnetic program and associated tools. particle openmc 0.10.0 Website OpenMC is a Monte Carlo particle transport simulation code focused on neutron criticality calculations. photonics meep 1.3 1.4.3 1.24.0 Website Meep is a free finite-difference time-domain (FDTD) simulation software package to model electromagnetic systems. photonics mpb 1.5 1.6.2 1.11.1 Website MPB is a free software package for computing the band structures, or dispersion relations, and electromagnetic modes of periodic dielectric structures, on both serial and parallel computers. quantum information science cuquantum 22.03.0.40 Website NVIDIA cuQuantum is an SDK of optimized libraries and tools for accelerating quantum computing workflows. quantum information science py-cuquantum-python 22.3.0_py39 Website NVIDIA cuQuantum Python provides Python bindings and high-level object-oriented models for accessing the full functionalities of NVIDIA cuQuantum SDK from Python. quantum mechanics py-quspin 0.3.5_py36 Website QuSpin is an open-source Python package for exact diagonalization and quantum dynamics of arbitrary boson, fermion and spin many-body systems. quantum mechanics py-qutip 4.5.2_py36 Website QuTiP is open-source software for simulating the dynamics of closed and open quantum systems."},{"location":"docs/software/list/#system","title":"system","text":"Field Module\u00a0name Version(s) URL Description backup restic 0.9.50.12.10.16.3 Website Fast, secure, efficient backup program. benchmark hp2p 3.2 Website Heavy Peer To Peer: a MPI based benchmark for network diagnostic. benchmark mpibench 20190729 Website Times MPI collectives over a series of message sizes. benchmark mprime 29.4 Website mprime is used by GIMPS, a distributed computing project dedicated to finding new Mersenne prime numbers, and which is commonly used as a stability testing utility. benchmark osu-micro-benchmarks 5.6.1 5.6.3 5.7 5.9 Website The OSU MicroBenchmarks carry out a variety of message passing performance tests using MPI. benchmark py-linktest 2.1.19_py39 Website LinkTest is a communication API benchmarking tool that tests point-to-point connections. checkpointing dmtcp 2.6.0 Website DMTCP (Distributed MultiThreaded Checkpointing) transparently checkpoints a single-host or distributed computation in user-space -- with no modifications to user code or to the O/S. cloud interface aws-cli 2.0.50 Website This package provides a unified command line interface to Amazon Web Services. cloud interface google-cloud-sdk 400.0.0448.0.0 Website Command-line interface for Google Cloud Platform products and services. cloud interface s5cmd 2.0.0 Website Parallel S3 and local filesystem execution tool. cloud interface steampipe 0.14.6 Website Steampipe is an open source tool for querying cloud APIs in a universal way and reasoning about the data in SQL. compiler mrc 1.3.3 Website MRC is a resource compiler that can create self-contained applications, by including all the required data inside executable files. compression libarchive 3.3.23.4.23.5.2 Website The libarchive project develops a portable, efficient C library that can read and write streaming archives in a variety of formats. compression libzip 1.5.1 Website libzip is a C library for reading, creating, and modifying zip archives. compression lz4 1.8.0 Website LZ4 is lossless compression algorithm. compression lzo 2.10 Website LZO is a portable lossless data compression library written in ANSI C. compression mpibzip2 0.6 Website MPIBZIP2 is a parallel implementation of the bzip2 block-sorting file compressor that uses MPI and achieves significant speedup on cluster machines. compression p7zip 16.02 Website p7zip is a Linux port of 7zip, a file archiver with high compression ratio. compression pbzip2 1.1.12 Website PBZIP2 is a parallel implementation of the bzip2 block-sorting file compressor that uses pthreads and achieves near-linear speedup on SMP machines. compression pigz 2.4 Website A parallel implementation of gzip for modern multi-processor, multi-core machines. compression szip 2.1.1 Website Szip compression software, providing lossless compression of scientific data, is an implementation of the extended-Rice lossless compression algorithm. compression xz 5.2.3 Website XZ Utils, the successor to LZMA Utils, is free general-purpose data compression software with a high compression ratio. compression zlib 1.2.11 Website zlib is designed to be a free, general-purpose, legally unencumbered -- that is, not covered by any patents -- lossless data-compression library for use on virtually any computer hardware and operating system. compression zstd 1.5.2 Website Zstandard, or zstd, is a fast lossless compression algorithm, targeting real-time compression scenarios at zlib-level and better compression ratios. containers libnvidia-container 1.0.0rc2 Website libnvidia-container is a library and a simple CLI utility to automatically configure GNU/Linux containers leveraging NVIDIA hardware. containers proot 5.2.0 5.1.0 Website PRoot is a user-space implementation of chroot, mount --bind, and binfmt_misc. containers py-spython 0.3.13_py390.3.13_py312 Website Singularity Python (spython) is the Python API for working with Singularity containers. database bdb 6.2.32 Website Berkeley DB (BDB) is a software library intended to provide a high-performance embedded database for key/value data. database mariadb 10.2.11 10.6.9 Website MariaDB is a community-developed fork of the MySQL relational database management system intended to remain free under the GNU GPL. database postgresql 10.514.5 Website PostgreSQL is a powerful, open source object-relational database system with a strong focus on reliability, feature robustness, and performance. database sqlite 3.18.03.37.23.44.2 Website SQLite is a self-contained, high-reliability, embedded, full-featured, public-domain, SQL database engine. database sqliteodbc 0.9998 Website ODBC driver for SQLite database unixodbc 2.3.9 Website unixODBC is an open-source project that implements the ODBC API. document management pandoc 2.7.3 Website Pandoc is a universal document converter. document processing ghostscript 9.53.2 Website Ghostscript is an interpreter for the PostScript language and PDF files. document processing groff 1.23.0 Website groff (GNU roff) is a typesetting system that reads plain text input files that include formatting commands to produce output in PostScript, PDF, HTML, or DVI formats or for display to a terminal. document processing lyx 2.3.2 Website LyX is a document processor. document processing poppler 0.47.0 Website Poppler is a PDF rendering library. document processing texinfo 6.6 Website Texinfo is the official documentation format of the GNU project. document processing texlive 2019 Website TeX Live is an easy way to get up and running with the TeX document production system. file management dua-cli 2.20.1 Website dua (-> Disk Usage Analyzer) is a tool to conveniently learn about the usage of disk space of a given directory. file management duc 1.4.4 Website Duc is a collection of tools for indexing, inspecting and visualizing disk usage. file management exa 0.8.0 Website exa is a replacement for ls written in Rust. file management fdupes 2.2.1 Website FDUPES is a program for identifying or deleting duplicate files residing within specified directories. file management fpart 0.9.3 Website fpart sorts files and packs them into partitions. file management midnight-commander 4.8.29 Website GNU Midnight Commander is a visual file manager. file management ncdu 1.18.1 1.15.12.2.1 Website Ncdu is a disk usage analyzer with an ncurses interface. file management py-pcircle 0.17_py27 Website pcircle contains a suite of file system tools developed at OLCF to take advantage of highly scalable parallel file system such as Lustre. file management rmlint 2.8.0 Website rmlint finds space waste and other broken things on your filesystem and offers to remove it. file management tdu 1.36 Website tdu estimates the disk space occupied by all files in a given path. file transfer aria2 1.35.0 Website aria2 is a lightweight multi-protocol & multi-source command-line download utility. file transfer aspera-cli 3.9.6 Website The IBM Aspera Command-Line Interface (the Aspera CLI) is a collection of Aspera tools for performing high-speed, secure data transfers from the command line. file transfer lftp 4.8.1 Website LFTP is a sophisticated file transfer program supporting a number of network protocols (ftp, http, sftp, fish, torrent). file transfer mpifileutils 0.10.1 0.11 0.11.1 Website mpiFileUtils is a suite of MPI-based tools to manage large datasets, which may vary from large directory trees to large files. file transfer py-globus-cli 1.2.01.9.0_py271.9.0_py363.2.0_py393.8.0_py393.19.0_py39 Website A command line wrapper over the Globus SDK for Python. file transfer py-httpie 3.2.1_py39 Website HTTPie is a command-line HTTP client designed for testing, debugging, and generally interacting with APIs and HTTP servers. file transfer rclone 1.55.11.59.11.65.0 Website Rclone is a command line program to sync files and directories to and from: Google Drive, Amazon S3, Dropbox, Google Cloud Storage, Amazon Drive, Microsoft One Drive, Hubic, Backblaze B2, Yandex Disk, or the local filesystem. framework mono 5.12.0.3015.20.1.19 Website Mono is an open source implementation of Microsoft's .NET Framework based on the ECMA standards for C# and the Common Language Runtime. hardware hwloc 2.7.02.9.3 Website The Portable Hardware Locality (hwloc) software package provides a portable abstraction of the hierarchical topology of modern architectures. hardware libpciaccess 0.16 Website Generic PCI access library. job management slurm-drmaa 1.1.2 Website DRMAA for Slurm Workload Manager (Slurm) is an implementation of Open Grid Forum Distributed Resource Management Application API (DRMAA) version 1 for submission and control of jobs to Slurm. language tcltk 8.6.6 Website Tcl (Tool Command Language) is a dynamic programming language, suitable for web and desktop applications, networking, administration, testing. Tk is a graphical user interface toolkit. libs apr 1.6.3 Website The Apache Portable Runtime is a supporting library for the Apache web server. It provides a set of APIs that map to the underlying operating system. libs apr-util 1.6.1 Website The Apache Portable Runtime is a supporting library for the Apache web server. It provides a set of APIs that map to the underlying operating system. libs atk 2.24.0 Website ATK is the Accessibility Toolkit. It provides a set of generic interfaces allowing accessibility technologies such as screen readers to interact with a graphical user interface. libs benchmark 1.2.0 Website A microbenchmark support library libs cairo 1.14.10 Website Cairo is a 2D graphics library with support for multiple output devices. libs cups 2.2.4 Website CUPS is the standards-based, open source printing system. libs dbus 1.10.22 Website D-Bus is a message bus system, a simple way for applications to talk to one another. libs enchant 1.6.12.2.3 Website Enchant is a library (and command-line program) that wraps a number of different spelling libraries and programs with a consistent interface. libs fltk 1.3.4 Website FLTK (pronounced 'fulltick') is a cross-platform C++ GUI toolkit. libs fontconfig 2.12.4 Website Fontconfig is a library for configuring and customizing font access. libs freeglut 3.0.0 Website FreeGLUT is a free-software/open-source alternative to the OpenGL Utility Toolkit (GLUT) library. libs freetype 2.8.12.9.1 Website FreeType is a software font engine that is designed to be small, efficient, highly customizable, and portable while capable of producing high-quality output (glyph images). libs fribidi 1.0.12 Website The Free Implementation of the Unicode Bidirectional Algorithm. libs ftgl 2.1.2 Website FTGL is a free cross-platform Open Source C++ library that uses Freetype2 to simplify rendering fonts in OpenGL applications. libs gc 7.6.0 Website The Boehm-Demers-Weiser conservative garbage collector can be used as a garbage collecting replacement for C malloc or C++ new. libs gconf 2.9.91 Website GConf is a system for storing application preferences. libs gdk-pixbuf 2.36.8 Website The GdkPixbuf library provides facilities for loading images in a variety of file formats. libs gflags 2.2.12.2.2 Website The gflags package contains a C++ library that implements commandline flags processing. libs giflib 5.1.4 Website GIFLIB is a package of portable tools and library routines for working with GIF images. libs glib 2.52.3 Website The GLib library provides core non-graphical functionality such as high level data types, Unicode manipulation, and an object and type system to C programs. libs glog 0.3.5 Website C++ implementation of the Google logging module. libs gnutls 3.5.9 Website GnuTLS is a secure communications library implementing the SSL, TLS and DTLS protocols and technologies around them. libs gobject-introspection 1.52.1 Website GObject introspection is a middleware layer between C libraries (using GObject) and language bindings. libs googletest 1.8.0 Website Google Test is Google's C++ test framework. libs gstreamer 1.12.0 Website GStreamer is a library for constructing graphs of media-handling components. libs gtk+ 2.24.303.22.18 Website GTK+, or the GIMP Toolkit, is a multi-platform toolkit for creating graphical user interfaces. libs harfbuzz 1.4.8 Website HarfBuzz is an OpenType text shaping engine. libs hunspell 1.6.2 Website Hunspell is a spell checker. libs hyphen 2.8.8 Website Hyphen is a hyphenation library to use converted TeX hyphenation patterns. libs icu 59.1 Website ICU is a set of C/C++ and Java libraries providing Unicode and Globalization support for software applications. libs jansson 2.13.1 Website C library for encoding, decoding and manipulating JSON data. libs jemalloc 5.3.0 Website jemalloc is a general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support. libs json-glib 1.4.4 Website JSON-GLib is a library providing serialization and deserialization support for the JavaScript Object Notation (JSON) format described by RFC 4627. libs leptonica 1.82.0 Website Leptonica is an open source library containing software that is broadly useful for image processing and image analysis applications. libs libaio 0.3.111 Website libaio provides the Linux-native API for async I/O. libs libart_lgpl 2.3.21 Website Libart is a library for high-performance 2D graphics. libs libcroco 0.6.13 Website Libcroco is a standalone css2 parsing and manipulation library. libs libepoxy 1.4.1 Website Epoxy is a library for handling OpenGL function pointer management for you. libs libexif 0.6.21 Website A library for parsing, editing, and saving EXIF data. libs libffi 3.2.1 Website libffi is a portable Foreign Function Interface library. libs libgcrypt 1.8.2 Website Libgcrypt is a general purpose cryptographic library originally based on code from GnuPG. libs libgd 2.2.5 Website GD is an open source code library for the dynamic creation of images by programmers. libs libgdiplus 5.6 Website C-based implementation of the GDI+ API libs libglvnd 1.2.0 Website libglvnd is a vendor-neutral dispatch layer for arbitrating OpenGL API calls between multiple vendors. libs libgnomecanvas 2.30.3 Website Library for the GNOME canvas, an engine for structured graphics that offers a rich imaging model, high performance rendering, and a powerful, high-level API. libs libgpg-error 1.27 Website Libgpg-error is a small library that originally defined common error values for all GnuPG components. libs libiconv 1.16 Website libiconv is a conversion library for string encoding. libs libidl 0.8.14 Website The libIDL package contains libraries for Interface Definition Language files. This is a specification for defining portable interfaces. libs libjpeg-turbo 1.5.1 2.1.4 Website libjpeg-turbo is a JPEG image codec that uses SIMD instructions (MMX, SSE2, AVX2, NEON, AltiVec) to accelerate baseline JPEG compression and decompression on x86, x86-64, ARM, and PowerPC systems libs libmng 2.0.3 Website THE reference library for reading, displaying, writing and examining Multiple-Image Network Graphics. MNG is the animation extension to the popular PNG image-format. libs libpng 1.2.571.6.29 Website libpng is the official PNG reference library. It supports almost all PNG features, is extensible, and has been extensively tested for over 20 years. libs libproxy 0.4.15 Website libproxy is a library that provides automatic proxy configuration management. libs libressl 2.5.33.2.1 Website LibreSSL is a version of the TLS/crypto stack forked from OpenSSL in 2014, with goals of modernizing the codebase, improving security, and applying best practice development processes. libs librsvg 2.36.4 Website Librsvg is a library to render SVG files using cairo as a rendering engine. libs libseccomp 2.3.3 Website The libseccomp library provides an easy to use, platform independent, interface to the Linux Kernel's syscall filtering mechanism.. libs libsodium 1.0.18 Website Sodium is a modern, easy-to-use software library for encryption, decryption, signatures, password hashing and more. libs libsoup 2.61.2 Website libsoup is an HTTP client/server library for GNOME. libs libtasn1 4.13 Website Libtasn1 is the ASN.1 library used by GnuTLS, p11-kit and some other packages. libs libtiff 4.0.8 4.4.04.5.0 Website libtiff provides support for the Tag Image File Format (TIFF), a widely used format for storing image data. libs libunistring 0.9.7 Website Libunistring provides functions for manipulating Unicode strings and for manipulating C strings according to the Unicode standard. libs libuuid 1.0.3 Website Portable uuid C library. libs libuv 1.38.1 Website libuv is a multi-platform support library with a focus on asynchronous I/O. libs libwebp 0.6.1 Website WebP is a modern image format that provides superior lossless and lossy compression for images on the web. libs libxkbcommon 0.9.1 Website libxkbcommon is a keyboard keymap compiler and support library which processes a reduced subset of keymaps as defined by the XKB (X Keyboard Extension) specification. libs libxml2 2.9.4 Website Libxml2 is a XML C parser and toolkit. libs libxslt 1.1.32 Website Libxslt is the XSLT C library developed for the GNOME project. XSLT itself is a an XML language to define transformation for XML. libs mesa 17.1.6 Website Mesa is an open-source implementation of the OpenGL, Vulkan and other specifications. libs minipmi 1.0 Website Implementation of a minimal subset of the PMI1 and PMI2 specifications. libs ncurses 6.06.4 Website The ncurses (new curses) library is a free software emulation of curses in System V Release 4.0 (SVr4), and more. libs nettle 3.3 Website Nettle is a cryptographic library that is designed to fit easily in more or less any context. libs openjpeg 2.3.1 Website OpenJPEG is an open-source JPEG 2000 codec written in C language. libs openssl 3.0.7 Website OpenSSL is a full-featured toolkit for general-purpose cryptography and secure communication. libs orbit 2.14.19 Website ORBit2 is a CORBA 2.4-compliant Object Request Broker (ORB) featuring mature C, C++ and Python bindings. libs pango 1.40.10 Website Pango is a library for laying out and rendering of text, with an emphasis on internationalization. libs pcre 8.40 Website The PCRE library is a set of functions that implement regular expression pattern matching using the same syntax and semantics as Perl 5. libs pcre2 10.3510.40 Website The PCRE22 library is a set of functions that implement regular expression pattern matching using the same syntax and semantics as Perl 5. libs popt 1.16 Website Library for parsing command line options. libs py-lmdb 0.93 Website Universal Python binding for the LMDB 'Lightning' Database. libs py-mako 1.0.7_py27 1.0.7_py36 Website Mako is a template library written in Python. It provides a familiar, non-XML syntax which compiles into Python modules for maximum performance. libs py-pygobject 3.32.2_py36 Website PyGObject is a Python package which provides bindings for GObject based libraries such as GTK, GStreamer, WebKitGTK, GLib, GIO and many more. libs py-pyopengl 3.1.5_py39 Website Standard OpenGL bindings for Python. libs py-pyqt5 5.9.1_py36 Website PyQt5 is a comprehensive set of Python bindings for Qt v5. libs readline 7.08.2 Website The GNU Readline library provides a set of functions for use by applications that allow users to edit command lines as they are typed in. libs serf 1.3.9 Website The serf library is a high performance C-based HTTP client library built upon the Apache Portable Runtime (APR) library. libs sionlib 1.7.7 Website Scalable I/O library for parallel access to task-local files. libs snappy 1.1.7 Website A fast compressor/decompressor. libs talloc 2.1.14 Website talloc is a hierarchical, reference counted memory pool system with destructors. libs tesseract 5.1.0 Website Tesseract is an open source text recognition (OCR) Engine. libs utf8proc 2.4.0 Website iutf8proc is a small, clean C library that provides Unicode normalization, case-folding, and other operations for data in the UTF-8 encoding. libs webkitgtk 2.28.4 Website WebKitGTK is a full-featured port of the WebKit rendering engine, suitable for projects requiring any kind of web integration, from hybrid HTML/CSS applications to full-fledged web browsers. libs wxwidgets 3.0.4 Website wxWidgets is a C++ library that lets developers create applications for Windows, macOS, Linux and other platforms with a single code base. libs yaml-cpp 0.7.0 Website yaml-cpp is a YAML parser and emitter in C++ matching the YAML 1.2 spec. media ffmpeg 4.04.2.15.0 Website FFmpeg is the leading multimedia framework, able to decode, encode, transcode, mux, demux, stream, filter and play pretty much anything that humans and machines have created. media libsndfile 1.0.28 Website Libsndfile is a C library for reading and writing files containing sampled sound (such as MS Windows WAV and the Apple/SGI AIFF format) through one standard library interface. performance likwid 4.3.25.2.1 Website Likwid is a simple toolsuite of command line applications for performance oriented programmers. resource monitoring nvtop 1.1.0 2.0.3 3.0.2 Website Nvtop stands for NVidia TOP, a (h)top like task monitor for NVIDIA GPUs. resource monitoring py-nvitop 1.3.2_py39 1.3.2_py312 Website An interactive NVIDIA-GPU process viewer and beyond. resource monitoring remora 1.8.5 Website Remora is a tool to monitor runtime resource utilization. resource monitoring ruse 2.0 Website A command line tool to measure process resource usage. scm gh 1.9.1 Website gh is GitHub on the command line. It brings pull requests, issues, and other GitHub concepts to the terminal next to where you are already working with git and your code. scm git 2.39.1 Website Git is a free and open source distributed version control system designed to handle everything from small to very large projects with speed and efficiency. scm git-annex 8.20210622 Website git-annex allows managing files with git, without checking the file contents into git. scm git-credential-manager 2.0.696 Website Secure, cross-platform Git credential storage with authentication to GitHub, Azure Repos, and other popular Git hosting services. scm git-lfs 2.4.0 Website Git Large File Storage (LFS) replaces large files such as audio samples, videos, datasets, and graphics with text pointers inside Git, while storing the file contents on a remote server. scm libgit2 1.1.0 Website libgit2 is a portable, pure C implementation of the Git core methods provided as a re-entrant linkable library with a solid API scm mercurial 4.5.3 Website Mercurial is a free, distributed source control management tool. scm py-dvc 0.91.1_py36 Website Data Version Control or DVC is an open-source tool for data science and machine learning projects. scm subversion 1.9.71.12.2 Website Subversion is an open source version control system. shell powershell 7.1.5 Website PowerShell Core is a cross-platform automation and configuration tool/framework. testing py-pytest 7.1.3_py39 Website pytest is a full-featured Python testing framework tool unifdef 2.12 Website The unifdef utility selectively processes conditional C preprocessor #if and #ifdef directives. tools clinfo 2.2.18.04.06 Website clinfo is a simple command-line application that enumerates all possible (known) properties of the OpenCL platform and devices available on the system. tools curl 8.4.0 Website curl is an open source command line tool and library for transferring data with URL syntax. tools depot_tools 20200731 Website Tools for working with Chromium development. tools expat 2.2.3 Website Expat is a stream-oriented XML parser library written in C. tools graphicsmagick 1.3.26 Website GraphicsMagick is the swiss army knife of image processing. tools imagemagick 7.0.7-2 Website ImageMagick is a free and open-source software suite for displaying, converting, and editing raster image and vector image files. tools jq 1.6 Website jq is a lightweight and flexible command-line JSON processor. tools leveldb 1.20 Website LevelDB is a fast key-value storage library written at Google that provides an ordered mapping from string keys to string values. tools lmdb 0.9.21 Website Symas LMDB is an extraordinarily fast, memory-efficient database we developed for the Symas OpenLDAP Project. tools motif 2.3.7 Website Motif is the toolkit for the Common Desktop Environment. tools parallel 2018012220200822 Website GNU parallel is a shell tool for executing jobs in parallel using one or more computers. tools password-store 1.7.4 Website Simple password manager using gpg and ordinary unix directories. tools py-clustershell 1.9.0_py39 Website ClusterShell is an event-driven open source Python library, designed to run local or distant commands in parallel on server farms or on large Linux clusters. tools py-matlab-proxy 0.9.1_py390.10.0_py39 Website matlab-proxy is a Python package which enables you to launch MATLAB and access it from a web browser. tools py-nvidia-ml-py 12.550.52_py39 12.550.52_py312 Website Python bindings to the NVIDIA Management Library. tools py-pyside 5.15.2.1_py39 Website PySide is the official Python module from the Qt for Python project, which provides access to the complete Qt framework. tools py-wxpython 4.0.7_py394.2.0_py39 Website wxPython is the cross-platform GUI toolkit for the Python language, tools qt 5.9.1 6.4.0 Website QT is a cross-platform application framework that is used for developing application software that can be run on various software and hardware platforms. tools ripgrep 11.0.1 Website ripgrep recursively searches directories for a regex pattern. tools rocksdb 5.7.3 Website A library that provides an embeddable, persistent key-value store for fast storage. tools x11 7.7 Website The X.Org project provides an open source implementation of the X Window System. tools xkeyboard-config 2.21 Website The non-arch keyboard configuration database for X Window."},{"location":"docs/software/list/#viz","title":"viz","text":"Field Module\u00a0name Version(s) URL Description data ncview 2.1.7 Website Ncview is a visual browser for netCDF format files. gis gmt 6.4.0 Website GMT (The Generic Mapping Tools) is an open source collection of command-line tools for manipulating geographic and Cartesian data sets. gis panoply 4.10.8 Website Panoply plots geo-referenced and other arrays from netCDF, HDF, GRIB, and other datasets. gis py-cartopy 0.21.0_py39 Website Cartopy is a Python package designed for geospatial data processing in order to produce maps and other geospatial data analyses. graphs graphviz 2.40.12.44.1 Website Graphviz is open source graph visualization software. imaging py-pillow 5.1.0_py27 5.1.0_py367.0.0_py368.2.0_py399.3.0_py3910.2.0_py312 Website Pillow is a friendly PIL (Python Imaging Library) fork. imaging py-pillow-simd 7.0.0.post3_py369.2.0_py3910.2.0_py312 Website Pillow-SIMD is an optimized version of Pillow molecular visualization ovito 3.7.11 Website OVITO is a scientific visualization and data analysis solution for atomistic and other particle-based models. molecular visualization pymol 1.8.6.2 2.5.3 Website PyMOL is a Python-enhanced molecular graphics tool. plotting gnuplot 5.2.0 Website Gnuplot is a portable command-line driven graphing utility for Linux, OS/2, MS Windows, OSX, VMS, and many other platforms. plotting grace 5.1.25 Website Grace is a WYSIWYG tool to make two-dimensional plots of numerical data. plotting mathgl 8.0.1 Website MathGL is a library to make high-quality scientific graphics. plotting py-basemap 1.1.0_py27 1.1.0_py36 Website The matplotlib basemap toolkit is a library for plotting 2D data on maps in Python. plotting py-matplotlib 2.2.2_py27 2.1.2_py272.1.2_py362.2.2_py363.1.1_py363.2.1_py363.4.2_py393.7.1_py393.8.3_py312 Website Matplotlib is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. plotting py-plotly 2.4.1_py275.19.0_py395.19.0_py312 Website Plotly's Python graphing library makes interactive, publication-quality graphs online. plotting py-seaborn 0.12.1_py39 Website Seaborn is a Python data visualization library based on matplotlib. It provides a high-level interface for drawing attractive and informative statistical graphics. plotting veusz 3.3.1 Website Veusz is a scientific plotting and graphing program with a graphical user interface, designed to produce publication-ready 2D and 3D plots. remote display virtualgl 2.5.2 Website VirtualGL is an open source toolkit that gives any Unix or Linux remote display software the ability to run OpenGL applications with full 3D hardware acceleration."},{"location":"docs/software/modules/","title":"Modules","text":""},{"location":"docs/software/modules/#environment-modules","title":"Environment modules","text":"

Software is provided on Sherlock under the form of loadable environment modules.

Software is only accessible via modules

The use of a module system means that most software is not accessible by default and has to be loaded using the module command. This mechanism allows us to provide multiple versions of the same software concurrently, and gives users the possibility to easily switch between software versions.

Sherlock uses Lmod to manage software installations. The modules system helps setting up the user's shell environment to give access to applications, and make running and compiling software easier. It also allows us to provide multiple versions of the same software, that would otherwise conflict with each other, and abstract things from the OS sometimes rigid versions and dependencies.

When you first log into Sherlock, you'll be presented with a default, bare bone environment with minimal software available. The module system is used to manage the user environment and to activate software packages on demand. In order to use software installed on Sherlock, you must first load the corresponding software module.

When you load a module, the system will set or modify your user environment variables to enable access to the software package provided by that module. For instance, the $PATH environment variable might be updated so that appropriate executables for that package can be used.

"},{"location":"docs/software/modules/#module-categories","title":"Module categories","text":"

Modules on Sherlock are organized by scientific field, in distinct categories. This is to limit the information overload that can result when displaying the full list of available modules. Given the large diversity of the Sherlock user population, all users are not be interested in the same kind of software, and high-energy physicists may not want to see their screens cluttered with the latest bioinformatics packages.

Module categories

You will first have to load a category module before getting access to individual modules. The math and devel categories are loaded by default, and modules in those categories can be loaded directly

For instance, to be able to load the gromacs module, you'll first need to load the chemistry module. This can be done in a single command, by specifying first the category, then the actual application module name:

$ module load chemistry gromacs\n

The math and devel categories, which are loaded by default, provide direct access to compilers, languages, and MPI and numerical libraries.

For a complete list of software module categories, please refer to the list of available software

Searching for a module

To know how to access a module, you can use the module spider <module_name> command. It will search through all the installed modules, even if they're masked, and display instructions to load them. See the Examples section for details.

"},{"location":"docs/software/modules/#module-usage","title":"Module usage","text":"

The most common module commands are outlined in the following table. module commands may be shortened with the ml alias, with slightly different semantics.

Module names auto-completion

The module command supports auto-completion, so you can just start typing the name of a module, and press Tab to let the shell automatically complete the module name and/or version.

Module\u00a0command Short\u00a0version Description module avail ml av List\u00a0available\u00a0software1 module spider gromacs ml spider gromacs Search for particular software module keyword blas ml key blas Search for blas in module names and descriptions module whatis gcc ml whatis gcc Display information about the gcc module module help gcc ml help gcc Display module specific help module load gcc ml gcc Load a module to use the associated software module load gsl/2.3 ml gsl/2.3 Load specific version of a module module unload gcc ml -gcc Unload a module module swap gcc icc ml -gcc icc Swap a module (unload gcc and replace it with icc) module purge ml purge Remove all modules2 module save foo ml save foo Save the state of all loaded modules in a collection named foo module restore foo ml restore foo Restore the state of saved modules from the foo collection

Additional module sub-commands are documented in the module help command. For complete reference, please refer to the official Lmod documentation.

"},{"location":"docs/software/modules/#module-properties","title":"Module properties","text":"

Multiple versions

When multiple versions of the same module exist, module will load the one marked as Default (D). For the sake of reproducibility, we recommend always specifying the module version you want to load, as defaults may evolve over time.

To quickly see some of the modules characteristics, module avail will display colored property attributes next to the module names. The main module properties are:

  • S: Module is sticky, requires --force to unload or purge
  • L: Indicate currently loaded module
  • D: Default module that will be loaded when multiple versions are available
  • r: Restricted access, typically software under license. Contact us for details
  • g: GPU-accelerated software, will only run on GPU nodes
  • m: Software supports parallel execution using MPI
"},{"location":"docs/software/modules/#searching-for-modules","title":"Searching for modules","text":"

You can search through all the available modules for either:

  • a module name (if you already know it), using module spider
  • any string within modules names and descriptions, using module keyword

For instance, if you want to know how to load the gromacs module, you can do:

$ module spider gromacs\n

If you don't know the module name, or want to list all the modules that contain a specific string of characters in their name or description, you can use module keyword. For instance, the following command will list all the modules providing a BLAS library:

$ module keyword blas\n
"},{"location":"docs/software/modules/#examples","title":"Examples","text":""},{"location":"docs/software/modules/#listing","title":"Listing","text":"

To list all the modules that can be loaded, you can do:

$ ml av\n\n-- math -- numerical libraries, statistics, deep-learning, computer science ---\n   R/3.4.0             gsl/1.16             openblas/0.2.19\n   cudnn/5.1  (g)      gsl/2.3       (D)    py-scipystack/1.0_py27 (D)\n   cudnn/6.0  (g,D)    imkl/2017.u2         py-scipystack/1.0_py36\n   fftw/3.3.6          matlab/R2017a (r)\n\n------------------ devel -- compilers, MPI, languages, libs -------------------\n   boost/1.64.0          icc/2017.u2           python/2.7.13    (D)\n   cmake/3.8.1           ifort/2017.u2         python/3.6.1\n   cuda/8.0.61    (g)    impi/2017.u2   (m)    scons/2.5.1_py27 (D)\n   eigen/3.3.3           java/1.8.0_131        scons/2.5.1_py36\n   gcc/6.3.0      (D)    julia/0.5.1           sqlite/3.18.0\n   gcc/7.1.0             llvm/4.0.0            tbb/2017.u2\n   h5utils/1.12.1        nccl/1.3.4     (g)    tcltk/8.6.6\n   hdf5/1.10.0p1         openmpi/2.0.2  (m)\n\n-------------- categories -- load to make more modules available --------------\n   biology      devel (S,L)    physics    system\n   chemistry    math  (S,L)    staging    viz\n\n  Where:\n   S:  Module is Sticky, requires --force to unload or purge\n   r:  Restricted access\n   g:  GPU support\n   L:  Module is loaded\n   m:  MPI support\n   D:  Default Module\n\nUse \"module spider\" to find all possible modules.\nUse \"module keyword key1 key2 ...\" to search for all possible modules matching\nany of the \"keys\".\n
"},{"location":"docs/software/modules/#searching","title":"Searching","text":"

To search for a specific string in modules names and descriptions, you can run:

$ module keyword numpy\n---------------------------------------------------------------------------\n\nThe following modules match your search criteria: \"numpy\"\n---------------------------------------------------------------------------\n\n  py-scipystack: py-scipystack/1.0_py27, py-scipystack/1.0_py36\n    The SciPy Stack is a collection of open source software for scientific\n    computing in Python. It provides the following packages: numpy, scipy,\n    matplotlib, ipython, jupyter, pandas, sympy and nose.\n\n---------------------------------------------------------------------------\n[...]\n$ ml key compiler\n---------------------------------------------------------------------------\n\nThe following modules match your search criteria: \"compiler\"\n---------------------------------------------------------------------------\n\n  cmake: cmake/3.8.1\n    CMake is an extensible, open-source system that manages the build\n    process in an operating system and in a compiler-independent manner.\n\n  gcc: gcc/6.3.0, gcc/7.1.0\n    The GNU Compiler Collection includes front ends for C, C++, Fortran,\n    Java, and Go, as well as libraries for these languages (libstdc++,\n    libgcj,...).\n\n  icc: icc/2017.u2\n    Intel C++ Compiler, also known as icc or icl, is a group of C and C++\n    compilers from Intel\n\n  ifort: ifort/2017.u2\n    Intel Fortran Compiler, also known as ifort, is a group of Fortran\n    compilers from Intel\n\n  llvm: llvm/4.0.0\n    The LLVM Project is a collection of modular and reusable compiler and\n    toolchain technologies. Clang is an LLVM native C/C++/Objective-C\n    compiler,\n\n---------------------------------------------------------------------------\n

To get information about a specific module, especially how to load it, the following command can be used:

$ module spider gromacs\n\n-------------------------------------------------------------------------------\n  gromacs: gromacs/2016.3\n-------------------------------------------------------------------------------\n    Description:\n      GROMACS is a versatile package to perform molecular dynamics, i.e.\n      simulate the Newtonian equations of motion for systems with hundreds to\n      millions of particles.\n\n    Properties:\n      GPU support      MPI support\n\n    You will need to load all module(s) on any one of the lines below before\n    the \"gromacs/2016.3\" module is available to load.\n\n      chemistry\n
"},{"location":"docs/software/modules/#loading","title":"Loading","text":"

Loading a category module allows to get access to field-specific software:

$ ml chemistry\n$ ml av\n\n------------- chemistry -- quantum chemistry, molecular dynamics --------------\n   gromacs/2016.3 (g,m)    vasp/5.4.1 (g,r,m)\n\n-- math -- numerical libraries, statistics, deep-learning, computer science ---\n   R/3.4.0             gsl/1.16             openblas/0.2.19\n   cudnn/5.1  (g)      gsl/2.3       (D)    py-scipystack/1.0_py27 (D)\n   cudnn/6.0  (g,D)    imkl/2017.u2         py-scipystack/1.0_py36\n   fftw/3.3.6          matlab/R2017a (r)\n\n------------------ devel -- compilers, MPI, languages, libs -------------------\n   boost/1.64.0          icc/2017.u2           python/2.7.13    (D)\n   cmake/3.8.1           ifort/2017.u2         python/3.6.1\n   cuda/8.0.61    (g)    impi/2017.u2   (m)    scons/2.5.1_py27 (D)\n   eigen/3.3.3           java/1.8.0_131        scons/2.5.1_py36\n   gcc/6.3.0      (D)    julia/0.5.1           sqlite/3.18.0\n   gcc/7.1.0             llvm/4.0.0            tbb/2017.u2\n   h5utils/1.12.1        nccl/1.3.4     (g)    tcltk/8.6.6\n   hdf5/1.10.0p1         openmpi/2.0.2  (m)\n\n-------------- categories -- load to make more modules available --------------\n   biology          devel (S,L)    physics    system\n   chemistry (L)    math  (S,L)    staging    viz\n\n[...]\n
"},{"location":"docs/software/modules/#resetting-the-modules-environment","title":"Resetting the modules environment","text":"

If you want to reset your modules environment as it was when you initially connected to Sherlock, you can use the ml reset command: it will remove all the modules you have loaded, and restore the original state where only the math and devel categories are accessible.

If you want to remove all modules from your environment, including the default math and devel modules, you can use ml --force purge.

"},{"location":"docs/software/modules/#loading-modules-in-jobs","title":"Loading modules in jobs","text":"

In order for an application running in a Slurm job to have access to any necessary module-provided software packages, we recommend loading those modules in the job script directly. Since Slurm propagates all user environment variables by default, this is not strictly necessary, as jobs will inherit the modules loaded at submission time. But to make sure things are reproducible and avoid issues, it is preferable to explicitly load the modules in the batch scripts.

module load commands should be placed right after #SBATCH directives and before the actual executable calls. For instance:

#!/bin/bash\n#SBATCH ...\n#SBATCH ...\n#SBATCH ...\n\nml reset\nml load gromacs/2016.3\n\nsrun gmx_mpi ...\n
"},{"location":"docs/software/modules/#custom-modules","title":"Custom modules","text":"

Users are welcome and encouraged to build and install their own software on Sherlock. To that end, and to facilitate usage or sharing of their custom software installations, they can create their own module repositories.

See the Software Installation page for more details.

"},{"location":"docs/software/modules/#contributed-software","title":"Contributed software","text":"

PI groups, labs or departments can share their software installations and modules with the whole Sherlock community of users, and let everyone benefit from their tuning efforts and software developments.

Those modules are available in the specific contribs category, and organized by contributor name.

For instance, listing the available contributed modules can be done with:

$ ml contribs\n$ ml av\n-------------------- contribs -- contributed software ----------------------\n   poldrack\n

To get information about a specific lab module:

$ ml show poldrack\n----------------------------------------------------------------------------\n   /share/software/modules/contribs/poldrack.lua:\n----------------------------------------------------------------------------\nprepend_path(\"MODULEPATH\",\"/home/groups/russpold/modules\")\nwhatis(\"Name:        poldrack\")\nwhatis(\"Version:     1.0\")\nwhatis(\"Category:    contribs\")\nwhatis(\"URL:         https://github.com/poldracklab/lmod_modules\")\nwhatis(\"Description: Software modules contributed by the Poldrack Lab.\")\n

And to list the available software modules contributed by the lab:

$ ml poldrack\n$ ml av\n\n------------------------ /home/groups/russpold/modules -------------------------\n   afni/17.3.03           freesurfer/6.0.1            gsl/2.3      (D)\n   anaconda/5.0.0-py36    fsl/5.0.9                   pigz/2.4\n   ants/2.1.0.post710     fsl/5.0.11           (D)    remora/1.8.2\n   c3d/1.1.0              git-annex/6.20171109        xft/2.3.2\n[...]\n
  1. If a module is not listed here, it might be unavailable in the loaded modules categories, and require loading another category module. Search for not-listed software using the module spider command.\u00a0\u21a9

  2. The math and devel category modules will not be unloaded with module purge as they are \"sticky\". If a user wants to unload a sticky module, they must specify the --force option.\u00a0\u21a9

"},{"location":"docs/software/containers/","title":"Index","text":""},{"location":"docs/software/containers/#introduction","title":"Introduction","text":"

Containers are a solution to the problem of how to get software to run reliably when moved from one computing environment to another. They also resolve installation problems by packaging all the dependencies of an application within a self-sustainable image, a.k.a a container.

What's a container?

Put simply, a container consists of an entire runtime environment: an application, plus all its dependencies, libraries and other binaries, and configuration files needed to run it, bundled into one package. By containerizing the application platform and its dependencies, differences in OS distributions and underlying infrastructure are abstracted away.

"},{"location":"docs/software/containers/#container-solutions","title":"Container solutions","text":"

There are several ways to run containers in general, and on Sherlock specifically.

  • Apptainer

    Apptainer (formerly Singularity) is an open source container platform designed to run complex applications on high-performance computing (HPC) clusters in a simple, portable, and reproducible way.

    More information

  • More to come...

"},{"location":"docs/software/containers/apptainer/","title":"Singularity","text":"

Singularity is an open source container platform designed to run complex applications on high-performance computing (HPC) clusters in a simple, portable, and reproducible way. It's like Docker, but for HPC systems.

"},{"location":"docs/software/containers/apptainer/#why-not-docker","title":"Why not Docker?","text":"

Docker has long been the reference and the most popular container framework in DevOps and Enterprise IT environments, so why not use Docker on Sherlock? Well, for a variety of technical reasons, mostly related to security.

Docker has never been designed nor developed to run in multi-tenants environments, and even less on HPC clusters. Specifically:

  • Docker requires a daemon running as root on all of the compute nodes, which has serious security implications,
  • all authenticated actions (such as login, push ...) are also executed as root, meaning that multiple users can't use those functions on the same node,
  • Docker uses cgroups to isolate containers, as does the Slurm scheduler, which uses cgroups to allocate resources to jobs and enforce limits. Those uses are unfortunately conflicting.
  • but most importantly, allowing users to run Docker containers will give them root privileges inside that container, which will in turn let them access any of the clusters' filesystems as root. This opens the door to user impersonation, inappropriate file tampering or stealing, and is obviously not something that can be allowed on a shared resource.

That last point is certainly the single most important reason why we won't use Docker on Sherlock.

"},{"location":"docs/software/containers/apptainer/#why-singularity","title":"Why Singularity?","text":"

Singularity is Docker for HPC systems

Singularity allows running Docker containers natively, and is a perfect replacement for Docker on HPC systems such as Sherlock. That means that existing Docker container can be directly imported and natively run with SIngularity.

Despite Docker's shortcomings on HPC systems, the appeal of containers for scientific computing is undeniable, which is why we provide Singularity on Sherlock. Singularity is an alternative container framework, especially designed to run scientific applications on HPC clusters.

Singularity provides the same functionalities as Docker, without any of the drawbacks listed above. Using a completely different implementation, it doesn't require any privilege to run containers, and allow direct interaction with existing Docker containers.

The main motivation to use Singularity over Docker is the fact that it's been developed with HPC systems in mind, to solve those specific problems:

  • security: a user in the container is the same user as the one running the container, so no privilege escalation possible,
  • ease of deployment: no daemon running as root on each node, a container is simply an executable,
  • no need to mount filesystems or do bind mappings to access devices,
  • ability to run MPI jobs based on containers,
  • and more...
"},{"location":"docs/software/containers/apptainer/#more-documentation","title":"More documentation","text":"

The following documentation specifically intended for using Singularity on Sherlock. For more complete documentation about building and running containers with Singularity, please see the Singularity documentation.

"},{"location":"docs/software/containers/apptainer/#singularity-on-sherlock","title":"Singularity on Sherlock","text":"

As announced during the SC'18 Supercomputing Conference, Singularity is an integral part of the Sherlock cluster, and Singularity commands can be executed natively on any login or compute node, without the need to load any additional module1.

"},{"location":"docs/software/containers/apptainer/#importing-containers","title":"Importing containers","text":"

Pre-built containers can be obtained from a variety of sources. For instance:

  • DockerHub contains containers for various software packages, which can be directly used with Singularity,
  • SingularityHub is a registry for scientific linux containers,
  • the NVIDIA GPU Cloud registry for GPU-optimized containers,
  • many individual projects contain specific instructions for installation via Docker and/or Singularity, and may provide pre-built images in other locations.

To illustrate how Singularity can import and run Docker containers, here's an example how to install and run the OpenFOAM CFD solver using Singularity. OpenFOAM can be quite difficult to install manually, but Singularity makes it very easy.

Interactive or batch usage

This example shows how to use Singularity interactively, but Singularity containers can be run in batch jobs as well.

The first step is to request an interactive shell. Singularity images can be pulled directly on compute nodes, and Singularity uses multiple CPU cores when assembling the image, so requesting multiple cores in your job can make the pull operation faster:

$ srun -c 4 --pty bash\n

We recommend storing Singularity images in $GROUP_HOME, as container images can take significant space in your $HOME directory.

$ mkdir -p $GROUP_HOME/$USER/simg\n$ cd $GROUP_HOME/$USER/simg\n

Then, the OpenFOAM container could be pulled directly from DockerHub by Singularity. This can take a moment to complete:

$ singularity pull docker://openfoam/openfoam6-paraview54\nDocker image path: index.docker.io/openfoam/openfoam6-paraview54:latest\nCache folder set to /scratch/users/kilian/.singularity/docker\nImporting: base Singularity environment\nExploding layer: sha256:1be7f2b886e89a58e59c4e685fcc5905a26ddef3201f290b96f1eff7d778e122.tar.gz\n[...]\nBuilding Singularity image...\nSingularity container built: ./openfoam6-paraview54.simg\nCleaning up...\nDone. Container is at: ./openfoam6-paraview54.simg\n
"},{"location":"docs/software/containers/apptainer/#running-containers","title":"Running containers","text":"

Once the image is downloaded, you are ready to run OpenFOAM from the container. The singularity shell command can be used to start the container, and run a shell within that image:

By default on Sherlock, all the filesystems that are available on the compute node will also be available in the container. If you want to start your shell in a specific directory, you can use the --pwd /path/ option. For instance, we'll create a /tmp/openfoam_test/ directory to store our tests results (that will be wiped out at the end of the job), and start the container shell there:

$ mkdir /tmp/openfoam_test\n$ singularity shell --pwd /tmp/openfoam_test openfoam6-paraview54.simg\nSingularity: Invoking an interactive shell within container...\nSingularity openfoam6-paraview54.simg:/tmp/openfoam_test>\n

You're now in the container, as denoted by the shell prompt (Singularity[...].simg:[path]>), which is different from the prompt displayed on the compute node (which usually looks like [login]@[compute_node] [path]$.

OpenFOAM provides a convenience script that can be sourced to make OpenFOAM commands directly accessible and set a few useful environment variables:

> source /opt/openfoam6/etc/bashrc\n

Now, we can run a simple example using OpenFOAM:

> cp -r $FOAM_TUTORIALS/incompressible/simpleFoam/pitzDaily .\n> cd pitzDaily\n> blockMesh\n[...]\nEnd\n\n> simpleFoam\n/*---------------------------------------------------------------------------*\\\n  =========                 |\n  \\\\      /  F ield         | OpenFOAM: The Open Source CFD Toolbox\n   \\\\    /   O peration     | Website:  https://openfoam.org\n    \\\\  /    A nd           | Version:  6\n     \\\\/     M anipulation  |\n\\*---------------------------------------------------------------------------*/\nBuild  : 6-1a0c91b3baa8\nExec   : simpleFoam\nDate   : Oct 05 2018\nTime   : 23:37:30\nHost   : \"sh01-06n33.int\"\nPID    : 14670\nI/O    : uncollated\nCase   : /tmp/openfoam_test/pitzDaily\nnProcs : 1\nsigFpe : Enabling floating point exception trapping (FOAM_SIGFPE).\nfileModificationChecking : Monitoring run-time modified files using timeStampMaster (fileModificationSkew 10)\nallowSystemOperations : Allowing user-supplied system call operations\n\n// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\nCreate time\n[...]\nSIMPLE solution converged in 288 iterations\n\nstreamLine streamlines write:\n    seeded 10 particles\n    Tracks:10\n    Total samples:11980\n    Writing data to \"/tmp/openfoam_test/pitzDaily/postProcessing/sets/streamlines/288\"\nEnd\n\n>\n

When the simulation is done, you can exit the container with:

> exit\n

Because the container can see all the compute node's filesystems, the simulation output will be available in /tmp/openfoam_test after you exit the container:

$ ls /tmp/openfoam_test/pitzDaily/postProcessing/\nsets\n
"},{"location":"docs/software/containers/apptainer/#gpu-enabled-containers","title":"GPU-enabled containers","text":"

Sherlock also supports the use of container images provided by NVIDIA in the NVIDIA GPU Cloud (NGC). This registry provides GPU-accelerated containers for the most popular HPC and deep-learning scientific applications.

GPU support

Containers provided on NGC are only supported on Pascal and Volta architectures (TITAN Xp, Tesla P40, P100 or V100). For GPUs from the previous generations (GTX TITAN Black/X, Tesla K20/K80), things may or may not work.

We recommend making sure to select a supported GPU generation by adding the following directive to your batch script when submitting a job to run GPU-enabled containers from NGC:

#SBATCH -C \"GPU_GEN:PSC|GPU_GEN:VLT\"\n

"},{"location":"docs/software/containers/apptainer/#pulling-ngc-images","title":"Pulling NGC images","text":"

As before, we start by requesting an interactive shell with multiple CPU cores, loading the Singularity module and moving the directory where we'll save those images:

$ srun -c 4 --pty bash\n$ cd $GROUP_HOME/simg\n

A GPU is not required for pulling GPU-enabled containers

GPU-enabled containers can be pulled on any node, including nodes without a GPU. But their execution requires a GPU and thus, they need to be executed within a GPU job. See the GPU job section for more information.

To be able to pull an image from NGC, authentication credentials must be set. Users need to register and create an NGC API key, complete details could be found in the NGC Getting Started Guide.

You can then set the following environment variable to allow Singularity to authenticate with NGC:

$ export SINGULARITY_DOCKER_USERNAME='$oauthtoken'\n$ export SINGULARITY_DOCKER_PASSWORD=<NVIDIA NGC API key>\n

Note

The SINGULARITY_DOCKER_USERNAME environment variable must be set to the literal $oauthtoken string, for every user. It should not be replaced by anything else. Only the API key is specific to each user.

Once credentials are set in the environment, container images can be pulled from the NGC registry normally.

The general form of the Singularity command used to pull NGC containers is: $ singularity pull docker://nvcr.io/<registry>/<app:tag>

For example to pull the NAMD NGC container tagged with version 2.12-171025 the corresponding command would be:

$ singularity pull docker://nvcr.io/hpc/namd:2.12-171025\n

After this command has finished, we'll have a Singularity image file in the current directory, named namd-2.12-171025.simg.

"},{"location":"docs/software/containers/apptainer/#running-ngc-containers","title":"Running NGC containers","text":"

Instructions about running NGC containers are provided on the NGC website, under each application:

Each application comes with specific running instructions, so we recommend to follow the container's particular guidelines before running it with Singularity.

Containers that lack Singularity documentation have not been tested with Singularity.

Since all NGC containers are optimized for GPU acceleration, they will always be executed with the --nv Singularity option, to enable GPU support within the container.

We also need to submit a job requesting a GPU to run GPU-enabled containers. For instance:

$ srun -p gpu -c 4 --gres gpu:1 --pty bash\n

This will start an interactive shell on a GPU node, with 4 CPU cores and 1 GPU.

The NAMD container that was pulled just before can now be started with the following commands. We start by creating a temporary directory to hold the execution results, and start the container using this as the current directory:

$ mkdir /tmp/namd_test\n$ singularity shell --nv --pwd /tmp/namd_test $GROUP_HOME/simg/namd-2.12-171025.simg\nSingularity: Invoking an interactive shell within container...\n\nSingularity namd-2.12-171025.simg:/tmp/namd_test>\n

From there, we can run a NAMD test to verify that everything is working as expected.

> cp -r /workspace/examples .\n> /opt/namd/namd-multicore +p4 +idlepoll examples/apoa1/apoa1.namd\nCharm++: standalone mode (not using charmrun)\nCharm++> Running in Multicore mode:  4 threads\nCharm++> Using recursive bisection (scheme 3) for topology aware partitions\nConverse/Charm++ Commit ID: v6.8.2\n[...]\nInfo: Built with CUDA version 9000\nDid not find +devices i,j,k,... argument, using all\nPe 1 physical rank 1 will use CUDA device of pe 2\nPe 3 physical rank 3 will use CUDA device of pe 2\nPe 0 physical rank 0 will use CUDA device of pe 2\nPe 2 physical rank 2 binding to CUDA device 0 on sh02-14n13.int: 'TITAN Xp'  Mem: 12196MB  Rev: 6.1\nInfo: NAMD 2.12 for Linux-x86_64-multicore-CUDA\n[...]\nInfo: SIMULATION PARAMETERS:\nInfo: TIMESTEP               1\n[...]\nENERGY:    2000     20247.5090     20325.4554      5719.0088       183.9328        -340639.3103     25366.3986         0.0000         0.0000     46364.9951        -222432.0107       168.6631   -268797.0057   -222054.5175       168.8733          -1129.9509     -1799.6459    921491.4634     -2007.8380     -2007.4145\n\nWRITING EXTENDED SYSTEM TO OUTPUT FILE AT STEP 2000\nWRITING COORDINATES TO OUTPUT FILE AT STEP 2000\nThe last position output (seq=-2) takes 0.001 seconds, 559.844 MB of memory in use\nWRITING VELOCITIES TO OUTPUT FILE AT STEP 2000\nThe last velocity output (seq=-2) takes 0.001 seconds, 559.844 MB of memory in use\n====================================================\n\nWallClock: 17.593451  CPUTime: 17.497925  Memory: 559.843750 MB\n[Partition 0][Node 0] End of program\n

The simulation should take a few seconds to run. You can verify that it correctly executed on a GPU in the output above. When it's done, you can exit the container with:

> exit\n

Because the container can see all the compute node's filesystems, the simulation output will be available in /tmp/named_test after you exit the container:

$ cd /tmp/namd_test/examples/apoa1/\n$ ls apoa1-out*\napoa1-out.coor  apoa1-out.vel  apoa1-out.xsc\n
"},{"location":"docs/software/containers/apptainer/#building-your-own-containers","title":"Building your own containers","text":"

Building Singularity containers requires root privileges, and as such, cannot be done on Sherlock directly.

If you need to modify existing containers or build your own from scratch, The recommended workflow is to prepare and build your containers on your local Linux machine (it could either be a workstation, a laptop or a virtual machine), transfer the resulting container image to Sherlock, and run it there.

For complete details about how to build Singularity containers, please refer to the Singularity documentation.

  1. For more information about using modules on Sherlock, please see the software modules documentation.\u00a0\u21a9

"},{"location":"docs/software/containers/singularity/","title":"Singularity","text":"

Singularity is an open source container platform designed to run complex applications on high-performance computing (HPC) clusters in a simple, portable, and reproducible way. It's like Docker, but for HPC systems.

"},{"location":"docs/software/containers/singularity/#why-not-docker","title":"Why not Docker?","text":"

Docker has long been the reference and the most popular container framework in DevOps and Enterprise IT environments, so why not use Docker on Sherlock? Well, for a variety of technical reasons, mostly related to security.

Docker has never been designed nor developed to run in multi-tenants environments, and even less on HPC clusters. Specifically:

  • Docker requires a daemon running as root on all of the compute nodes, which has serious security implications,
  • all authenticated actions (such as login, push ...) are also executed as root, meaning that multiple users can't use those functions on the same node,
  • Docker uses cgroups to isolate containers, as does the Slurm scheduler, which uses cgroups to allocate resources to jobs and enforce limits. Those uses are unfortunately conflicting.
  • but most importantly, allowing users to run Docker containers will give them root privileges inside that container, which will in turn let them access any of the clusters' filesystems as root. This opens the door to user impersonation, inappropriate file tampering or stealing, and is obviously not something that can be allowed on a shared resource.

That last point is certainly the single most important reason why we won't use Docker on Sherlock.

"},{"location":"docs/software/containers/singularity/#why-singularity","title":"Why Singularity?","text":"

Singularity is Docker for HPC systems

Singularity allows running Docker containers natively, and is a perfect replacement for Docker on HPC systems such as Sherlock. That means that existing Docker container can be directly imported and natively run with SIngularity.

Despite Docker's shortcomings on HPC systems, the appeal of containers for scientific computing is undeniable, which is why we provide Singularity on Sherlock. Singularity is an alternative container framework, especially designed to run scientific applications on HPC clusters.

Singularity provides the same functionalities as Docker, without any of the drawbacks listed above. Using a completely different implementation, it doesn't require any privilege to run containers, and allow direct interaction with existing Docker containers.

The main motivation to use Singularity over Docker is the fact that it's been developed with HPC systems in mind, to solve those specific problems:

  • security: a user in the container is the same user as the one running the container, so no privilege escalation possible,
  • ease of deployment: no daemon running as root on each node, a container is simply an executable,
  • no need to mount filesystems or do bind mappings to access devices,
  • ability to run MPI jobs based on containers,
  • and more...
"},{"location":"docs/software/containers/singularity/#more-documentation","title":"More documentation","text":"

The following documentation specifically intended for using Singularity on Sherlock. For more complete documentation about building and running containers with Singularity, please see the Singularity documentation.

"},{"location":"docs/software/containers/singularity/#singularity-on-sherlock","title":"Singularity on Sherlock","text":"

As announced during the SC'18 Supercomputing Conference, Singularity is an integral part of the Sherlock cluster, and Singularity commands can be executed natively on any login or compute node, without the need to load any additional module1.

"},{"location":"docs/software/containers/singularity/#importing-containers","title":"Importing containers","text":"

Pre-built containers can be obtained from a variety of sources. For instance:

  • DockerHub contains containers for various software packages, which can be directly used with Singularity,
  • SingularityHub is a registry for scientific linux containers,
  • the NVIDIA GPU Cloud registry for GPU-optimized containers,
  • many individual projects contain specific instructions for installation via Docker and/or Singularity, and may provide pre-built images in other locations.

To illustrate how Singularity can import and run Docker containers, here's an example how to install and run the OpenFOAM CFD solver using Singularity. OpenFOAM can be quite difficult to install manually, but Singularity makes it very easy.

Interactive or batch usage

This example shows how to use Singularity interactively, but Singularity containers can be run in batch jobs as well.

The first step is to request an interactive shell. Singularity images can be pulled directly on compute nodes, and Singularity uses multiple CPU cores when assembling the image, so requesting multiple cores in your job can make the pull operation faster:

$ srun -c 4 --pty bash\n

We recommend storing Singularity images in $GROUP_HOME, as container images can take significant space in your $HOME directory.

$ mkdir -p $GROUP_HOME/$USER/simg\n$ cd $GROUP_HOME/$USER/simg\n

Then, the OpenFOAM container could be pulled directly from DockerHub by Singularity. This can take a moment to complete:

$ singularity pull docker://openfoam/openfoam6-paraview54\nDocker image path: index.docker.io/openfoam/openfoam6-paraview54:latest\nCache folder set to /scratch/users/kilian/.singularity/docker\nImporting: base Singularity environment\nExploding layer: sha256:1be7f2b886e89a58e59c4e685fcc5905a26ddef3201f290b96f1eff7d778e122.tar.gz\n[...]\nBuilding Singularity image...\nSingularity container built: ./openfoam6-paraview54.simg\nCleaning up...\nDone. Container is at: ./openfoam6-paraview54.simg\n
"},{"location":"docs/software/containers/singularity/#running-containers","title":"Running containers","text":"

Once the image is downloaded, you are ready to run OpenFOAM from the container. The singularity shell command can be used to start the container, and run a shell within that image:

By default on Sherlock, all the filesystems that are available on the compute node will also be available in the container. If you want to start your shell in a specific directory, you can use the --pwd /path/ option. For instance, we'll create a /tmp/openfoam_test/ directory to store our tests results (that will be wiped out at the end of the job), and start the container shell there:

$ mkdir /tmp/openfoam_test\n$ singularity shell --pwd /tmp/openfoam_test openfoam6-paraview54.simg\nSingularity: Invoking an interactive shell within container...\nSingularity openfoam6-paraview54.simg:/tmp/openfoam_test>\n

You're now in the container, as denoted by the shell prompt (Singularity[...].simg:[path]>), which is different from the prompt displayed on the compute node (which usually looks like [login]@[compute_node] [path]$.

OpenFOAM provides a convenience script that can be sourced to make OpenFOAM commands directly accessible and set a few useful environment variables:

> source /opt/openfoam6/etc/bashrc\n

Now, we can run a simple example using OpenFOAM:

> cp -r $FOAM_TUTORIALS/incompressible/simpleFoam/pitzDaily .\n> cd pitzDaily\n> blockMesh\n[...]\nEnd\n\n> simpleFoam\n/*---------------------------------------------------------------------------*\\\n  =========                 |\n  \\\\      /  F ield         | OpenFOAM: The Open Source CFD Toolbox\n   \\\\    /   O peration     | Website:  https://openfoam.org\n    \\\\  /    A nd           | Version:  6\n     \\\\/     M anipulation  |\n\\*---------------------------------------------------------------------------*/\nBuild  : 6-1a0c91b3baa8\nExec   : simpleFoam\nDate   : Oct 05 2018\nTime   : 23:37:30\nHost   : \"sh01-06n33.int\"\nPID    : 14670\nI/O    : uncollated\nCase   : /tmp/openfoam_test/pitzDaily\nnProcs : 1\nsigFpe : Enabling floating point exception trapping (FOAM_SIGFPE).\nfileModificationChecking : Monitoring run-time modified files using timeStampMaster (fileModificationSkew 10)\nallowSystemOperations : Allowing user-supplied system call operations\n\n// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //\nCreate time\n[...]\nSIMPLE solution converged in 288 iterations\n\nstreamLine streamlines write:\n    seeded 10 particles\n    Tracks:10\n    Total samples:11980\n    Writing data to \"/tmp/openfoam_test/pitzDaily/postProcessing/sets/streamlines/288\"\nEnd\n\n>\n

When the simulation is done, you can exit the container with:

> exit\n

Because the container can see all the compute node's filesystems, the simulation output will be available in /tmp/openfoam_test after you exit the container:

$ ls /tmp/openfoam_test/pitzDaily/postProcessing/\nsets\n
"},{"location":"docs/software/containers/singularity/#gpu-enabled-containers","title":"GPU-enabled containers","text":"

Sherlock also supports the use of container images provided by NVIDIA in the NVIDIA GPU Cloud (NGC). This registry provides GPU-accelerated containers for the most popular HPC and deep-learning scientific applications.

GPU support

Containers provided on NGC are only supported on Pascal and Volta architectures (TITAN Xp, Tesla P40, P100 or V100). For GPUs from the previous generations (GTX TITAN Black/X, Tesla K20/K80), things may or may not work.

We recommend making sure to select a supported GPU generation by adding the following directive to your batch script when submitting a job to run GPU-enabled containers from NGC:

#SBATCH -C \"GPU_GEN:PSC|GPU_GEN:VLT\"\n

"},{"location":"docs/software/containers/singularity/#pulling-ngc-images","title":"Pulling NGC images","text":"

As before, we start by requesting an interactive shell with multiple CPU cores, loading the Singularity module and moving the directory where we'll save those images:

$ srun -c 4 --pty bash\n$ cd $GROUP_HOME/simg\n

A GPU is not required for pulling GPU-enabled containers

GPU-enabled containers can be pulled on any node, including nodes without a GPU. But their execution requires a GPU and thus, they need to be executed within a GPU job. See the GPU job section for more information.

To be able to pull an image from NGC, authentication credentials must be set. Users need to register and create an NGC API key, complete details could be found in the NGC Getting Started Guide.

You can then set the following environment variable to allow Singularity to authenticate with NGC:

$ export SINGULARITY_DOCKER_USERNAME='$oauthtoken'\n$ export SINGULARITY_DOCKER_PASSWORD=<NVIDIA NGC API key>\n

Note

The SINGULARITY_DOCKER_USERNAME environment variable must be set to the literal $oauthtoken string, for every user. It should not be replaced by anything else. Only the API key is specific to each user.

Once credentials are set in the environment, container images can be pulled from the NGC registry normally.

The general form of the Singularity command used to pull NGC containers is: $ singularity pull docker://nvcr.io/<registry>/<app:tag>

For example to pull the NAMD NGC container tagged with version 2.12-171025 the corresponding command would be:

$ singularity pull docker://nvcr.io/hpc/namd:2.12-171025\n

After this command has finished, we'll have a Singularity image file in the current directory, named namd-2.12-171025.simg.

"},{"location":"docs/software/containers/singularity/#running-ngc-containers","title":"Running NGC containers","text":"

Instructions about running NGC containers are provided on the NGC website, under each application:

Each application comes with specific running instructions, so we recommend to follow the container's particular guidelines before running it with Singularity.

Containers that lack Singularity documentation have not been tested with Singularity.

Since all NGC containers are optimized for GPU acceleration, they will always be executed with the --nv Singularity option, to enable GPU support within the container.

We also need to submit a job requesting a GPU to run GPU-enabled containers. For instance:

$ srun -p gpu -c 4 --gres gpu:1 --pty bash\n

This will start an interactive shell on a GPU node, with 4 CPU cores and 1 GPU.

The NAMD container that was pulled just before can now be started with the following commands. We start by creating a temporary directory to hold the execution results, and start the container using this as the current directory:

$ mkdir /tmp/namd_test\n$ singularity shell --nv --pwd /tmp/namd_test $GROUP_HOME/simg/namd-2.12-171025.simg\nSingularity: Invoking an interactive shell within container...\n\nSingularity namd-2.12-171025.simg:/tmp/namd_test>\n

From there, we can run a NAMD test to verify that everything is working as expected.

> cp -r /workspace/examples .\n> /opt/namd/namd-multicore +p4 +idlepoll examples/apoa1/apoa1.namd\nCharm++: standalone mode (not using charmrun)\nCharm++> Running in Multicore mode:  4 threads\nCharm++> Using recursive bisection (scheme 3) for topology aware partitions\nConverse/Charm++ Commit ID: v6.8.2\n[...]\nInfo: Built with CUDA version 9000\nDid not find +devices i,j,k,... argument, using all\nPe 1 physical rank 1 will use CUDA device of pe 2\nPe 3 physical rank 3 will use CUDA device of pe 2\nPe 0 physical rank 0 will use CUDA device of pe 2\nPe 2 physical rank 2 binding to CUDA device 0 on sh02-14n13.int: 'TITAN Xp'  Mem: 12196MB  Rev: 6.1\nInfo: NAMD 2.12 for Linux-x86_64-multicore-CUDA\n[...]\nInfo: SIMULATION PARAMETERS:\nInfo: TIMESTEP               1\n[...]\nENERGY:    2000     20247.5090     20325.4554      5719.0088       183.9328        -340639.3103     25366.3986         0.0000         0.0000     46364.9951        -222432.0107       168.6631   -268797.0057   -222054.5175       168.8733          -1129.9509     -1799.6459    921491.4634     -2007.8380     -2007.4145\n\nWRITING EXTENDED SYSTEM TO OUTPUT FILE AT STEP 2000\nWRITING COORDINATES TO OUTPUT FILE AT STEP 2000\nThe last position output (seq=-2) takes 0.001 seconds, 559.844 MB of memory in use\nWRITING VELOCITIES TO OUTPUT FILE AT STEP 2000\nThe last velocity output (seq=-2) takes 0.001 seconds, 559.844 MB of memory in use\n====================================================\n\nWallClock: 17.593451  CPUTime: 17.497925  Memory: 559.843750 MB\n[Partition 0][Node 0] End of program\n

The simulation should take a few seconds to run. You can verify that it correctly executed on a GPU in the output above. When it's done, you can exit the container with:

> exit\n

Because the container can see all the compute node's filesystems, the simulation output will be available in /tmp/named_test after you exit the container:

$ cd /tmp/namd_test/examples/apoa1/\n$ ls apoa1-out*\napoa1-out.coor  apoa1-out.vel  apoa1-out.xsc\n
"},{"location":"docs/software/containers/singularity/#building-your-own-containers","title":"Building your own containers","text":"

Building Singularity containers requires root privileges, and as such, cannot be done on Sherlock directly.

If you need to modify existing containers or build your own from scratch, The recommended workflow is to prepare and build your containers on your local Linux machine (it could either be a workstation, a laptop or a virtual machine), transfer the resulting container image to Sherlock, and run it there.

For complete details about how to build Singularity containers, please refer to the Singularity documentation.

  1. For more information about using modules on Sherlock, please see the software modules documentation.\u00a0\u21a9

"},{"location":"docs/software/using/R/","title":"R","text":""},{"location":"docs/software/using/R/#introduction","title":"Introduction","text":"

R is a programming language and software environment for statistical computing and graphics. It is similar to the S language and environment developed at Bell Laboratories. R provides a wide variety of statistical and graphical techniques and is highly extensible.

"},{"location":"docs/software/using/R/#more-documentation","title":"More documentation","text":"

The following documentation is specifically intended for using R on Sherlock. For more complete documentation about R in general, please see the R documentation.

"},{"location":"docs/software/using/R/#r-on-sherlock","title":"R on Sherlock","text":"

R is available on Sherlock and the corresponding module can be loaded with:

$ ml R\n

For a list of available versions, you can execute ml spider R at the Sherlock prompt, or refer to the Software list page.

"},{"location":"docs/software/using/R/#using-r","title":"Using R","text":"

Once your environment is configured (ie. when the R module is loaded), R can be started by simply typing R at the shell prompt:

$ R\n\nR version 3.5.1 (2018-07-02) -- \"Feather Spray\"\nCopyright (C) 2018 The R Foundation for Statistical Computing\nPlatform: x86_64-pc-linux-gnu (64-bit)\n[...]\nType 'demo()' for some demos, 'help()' for on-line help, or\n'help.start()' for an HTML browser interface to help.\nType 'q()' to quit R.\n\n>\n

For a listing of command line options:

$ R --help\n
"},{"location":"docs/software/using/R/#running-a-r-script","title":"Running a R script","text":"

There are several ways to launch an R script on the command line, which will have different ways of presenting the script's output:

Method Output Rscript script.R displayed on screen, on stdout R CMD BATCH script.R redirected to a script.Rout file R --no-save < script.R displayed on screen, on stdout"},{"location":"docs/software/using/R/#submitting-a-r-job","title":"Submitting a R job","text":"

Here's an example R batch script that can be submitted via sbatch. It runs a simple matrix multiplication example, and demonstrates how to feed R code as a HEREDOC to R directly, so no intermediate R script is necessary:

Rtest.sbatch
#!/usr/bin/bash\n#SBATCH --time=00:10:00\n#SBATCH --mem=10G\n#SBATCH --output=Rtest.log\n\n# load the module\nml R\n\n# run R code\nR --no-save << EOF\nset.seed (1)\nm <- 4000\nn <- 4000\nA <- matrix (runif (m*n),m,n)\nsystem.time (B <- crossprod(A))\nEOF\n

You can save this script as Rtest.sbatch and submit it to the scheduler with:

$ sbatch Rtest.sbatch\n

Once the job is done, you should get a Rtest.out file in the current directory, with the following contents:

R version 3.5.1 (2018-07-02) -- \"Feather Spray\"\n[...]\n> set.seed (1)\n> m <- 4000\n> n <- 4000\n> A <- matrix (runif (m*n),m,n)\n> system.time (B <- crossprod(A))\n   user  system elapsed\n  2.649   0.077   2.726\n
"},{"location":"docs/software/using/R/#r-packages","title":"R packages","text":"

R comes with a single package library in $R_HOME/library, which contains the standard and most common packages. This is usually in a system location and is not writable by end-users.

To accommodate individual user's requirements, R provides a way for each user to install packages in the location of their choice. The default value for a directory where users can install their own R packages is $HOME/R/x86_64-pc-linux-gnu-library/<R_version> where <R_version> depends on the R version that is used. For instance, if you have the R/3.5.1 module loaded, the default R user library path will be $HOME/R/x86_64-pc-linux-gnu-library/3.5.

This directory doesn't exist by default. The first time a user installs a package, R will ask if she wants to use the default location and create the directory.

"},{"location":"docs/software/using/R/#installing-packages","title":"Installing packages","text":"

Install R packages in a standard shell session

Make sure to install your packages in a standard Sherlock shell session, not in an RStudio session.

To install a R package in your personal environment, the first thing to do is load the R module:

$ ml R\n

Then start a R session, and use the install.packages() function at the R prompt. For instance, the following example will install the doParallel package, using the US mirror of the CRAN repository:

$ R\n\nR version 3.5.1 (2018-07-02) -- \"Feather Spray\"\n[...]\n\n> install.packages('doParallel', repos='http://cran.us.r-project.org')\n

It should give the following warning:

Warning in install.packages(\"doParallel\", repos = \"http://cran.us.r-project.org\") :\n  'lib = \"/share/software/user/open/R/3.5.1/lib64/R/library\"' is not writable\nWould you like to use a personal library instead? (yes/No/cancel)\nWould you like to create a personal library\n\u2018~/R/x86_64-pc-linux-gnu-library/3.5\u2019\nto install packages into? (yes/No/cancel) y\n

Answering y twice will make R create a ~/R/x86_64-pc-linux-gnu-library/3.5 directory and instruct it to install future R packages there.

The installation will then proceed:

trying URL 'http://cran.us.r-project.org/src/contrib/doParallel_1.0.14.tar.gz'\nContent type 'application/x-gzip' length 173607 bytes (169 KB)\n==================================================\ndownloaded 169 KB\n\n* installing *source* package \u2018doParallel\u2019 ...\n** package \u2018doParallel\u2019 successfully unpacked and MD5 sums checked\n** R\n** demo\n** inst\n** byte-compile and prepare package for lazy loading\n** help\n*** installing help indices\n** building package indices\n** installing vignettes\n** testing if installed package can be loaded\n* DONE (doParallel)\n\nThe downloaded source packages are in\n        \u2018/tmp/Rtmp0RHrMZ/downloaded_packages\u2019\n>\n

and when it's done, you should be able to load the package within R with:

> library(doParallel)\nLoading required package: foreach\nLoading required package: iterators\nLoading required package: parallel\n>\n
"},{"location":"docs/software/using/R/#installing-large-packages","title":"Installing large packages","text":"

Installing large R packages can sometimes be very time consuming. To speed things up, R can utilize multiple CPUs in parallel when the Ncpus=n option is added to the install.packages() command (where n is the number of CPUs you'd like to use).

For instance, you can get an interactive session with 4 CPU cores with sh_dev:

$ sh_dev -c 4\n$ ml R\n$ R\n> install.packages(\"dplyr\", repos = \"http://cran.us.r-project.org\", Ncpus=4)\n
"},{"location":"docs/software/using/R/#alternative-installation-path","title":"Alternative installation path","text":"

To install R packages in a different location, you'll need to create that directory, and instruct R to install the packages there:

$ mkdir ~/R_libs/\n$ R\n> install.packages('doParallel', repos='http://cran.us.r-project.org', lib=\"~/R_libs\")\n

The installation will proceed normally and the doParallel package will be installed in $HOME/R_libs/.

Specifying the full destination path for each package installation could quickly become tiresome, so to avoid this, you can create a .Renviron file in your $HOME directory, and define your R_libs path there:

$ cat << EOF > $HOME/.Renviron\nR_LIBS=~/R_libs\nEOF\n

With this, whenever R is started, the $HOME/R_libs/ directory will be added to the list of places R will look for packages, and you won't need to specify this installation path when using install.packages() anymore.

Where does R look for packages?

To see the directories where R searches for packages and libraries, you can use the following command in R:

> .libPaths()\n

Sharing R packages

If you'd like to share R packages within your group, you can simply define $R_LIBS to point to a shared directory, such as $GROUP_HOME/R_libs and have each user in the group use the instructions below to define it in their own environment.

"},{"location":"docs/software/using/R/#setting-the-installation-repository","title":"Setting the installation repository","text":"

When installing a package, R needs to know from which repository the package should be downloaded. If it's not specified, it will prompt for it and display a list of available CRAN mirrors.

To avoid setting the CRAN mirror each time you run install.packages you can permanently set the mirror by creating a .Rprofile file in your $HOME directory, which R will execute each time it starts.

For instance, adding the following contents to your ~/.Rprofile will make sure that every install.packages() invocation will use the closest CRAN mirror:

## local creates a new, empty environment\n## This avoids polluting the global environment with\n## the object r\nlocal({\n  r = getOption(\"repos\")\n  r[\"CRAN\"] = \"https://cloud.r-project.org/\"\n  options(repos = r)\n})\n

Once this is set, you only need to specify the name of the package to install, and R will use the mirror you defined automatically:

> install.packages(\"doParallel\")\n[...]\ntrying URL 'https://cloud.r-project.org/src/contrib/doParallel_1.0.14.tar.gz'\nContent type 'application/x-gzip' length 173607 bytes (169 KB)\n==================================================\ndownloaded 169 KB\n
"},{"location":"docs/software/using/R/#installing-packages-from-github","title":"Installing packages from GitHub","text":"

R packages can be directly installed from GitHub using the devtools package. devtools needs to be installed first, with:

> install.packages(\"devtools\")\n

And then, you can then install a R package directly from its GitHub repository. For instance, to install dplyr from tidyverse/dplyr:

> library(devtools)\n> install_github(\"tidyverse/dplyr\")\n
"},{"location":"docs/software/using/R/#package-dependencies","title":"Package dependencies","text":"

Sometimes when installing R packages, other software is needed for the installation and/or compilation. For instance, when trying to install the sf package, you may encounter the following error messages:

> install.packages(\"sf\")\n[...]\nConfiguration failed because libudunits2.so was not found. Try installing:...\n[...]\nconfigure: error: gdal-config not found or not executable.\n

This is because sf needs a few dependencies, like udunits and gdal in order to compile and install successfully. Fortunately those dependencies are already available as modules on Sherlock.

Whenever you see \"not found\" errors, you may want to try searching the modules inventory with module spider:

$ module spider udunits\n\n----------------------------------------------------------------------------\n  udunits: udunits/2.2.26\n----------------------------------------------------------------------------\n    Description:\n      The UDUNITS package from Unidata is a C-based package for the\n      programmatic handling of units of physical quantities.\n\n\n    You will need to load all module(s) on any one of the lines below before\n    the \"udunits/2.2.26\" module is available to load.\n\n      physics\n

So for sf, in order to load the dependencies, exit R, load the udunits and gdal modules, and try installing sf again:

$ ml load physics udunits gdal geos\n$ ml R/4.3.2\n$ R\n> install.packages(\"sf\")\n

Getting dependencies right could be a matter of trial and error. You may have to load R, install packages, search modules, load modules, install packages again and so forth. Fortunately, R packages only need to be installed once, and many R package dependencies are already available as modules on Sherlock, you just need to search for them with module spider and load them.

And in case you're stuck, you can of course always send us an email and we'll be happy to assist.

"},{"location":"docs/software/using/R/#updating-packages","title":"Updating Packages","text":"

To upgrade R packages, you can use the update.packages() function within a R session.

For instance, to update the doParallel package:

> update.packages('doParallel')\n

When the package name is omitted, update.packages() will try to update all the packages that are installed. Which is the most efficient way to ensure that all the packages in your local R library are up to date.

Centrally installed packages can not be updated

Note that attempting to update centrally installed packages will fail. You will have to use install.packages() to install your own version of the packages in your $HOME directory instead.

"},{"location":"docs/software/using/R/#removing-packages","title":"Removing packages","text":"

To remove a package from your local R library, you can use the remove.packages() function. For instance:

> remove.packages('doParallel')\n
"},{"location":"docs/software/using/R/#examples","title":"Examples","text":""},{"location":"docs/software/using/R/#installing-devtools","title":"Installing devtools","text":"

devtools is a package that provides R functions that simplify many common tasks. While its core functionality revolves around package development, devtools can also be used to install packages, particularly those on GitHub.

Installing devtools is somewhat memory-intensive and has several dependencies. The following example shows how to run an interactive session with 4 CPUs, load the modules for the necessary dependencies, and install devtools for R version 4.2.0.

# Launch interactive dev session with 4 CPUs\n\n$ sh_dev -c 4\n\n# Load the required modules\n\n$ ml purge\n$ ml R/4.2.0\n$ ml system harfbuzz fribidi\n$ ml cmake libgit2\n$ ml openssl\n\n# Launch R and install devtools\n\n$ R\n> install.packages(\"devtools\", repos = \"http://cran.us.r-project.org\", Ncpus=4)\n
"},{"location":"docs/software/using/R/#single-node","title":"Single node","text":"

R has a couple of powerful and easy-to-use tools to parallelize your R jobs. doParallel is one of them. If the doParallel package is not installed in your environment yet, you can install it in a few easy steps.

Here is a quick doParallel example that uses one node and 16 cores on Sherlock (more nodes or CPU cores can be requested, as needed).

Save the two scripts below in a directory on Sherlock:

doParallel_test.RdoParallel_test.sbatch
# Example doParallel script\n\nif(!require(doParallel)) install.packages(\"doParallel\")\nlibrary(doParallel)\n\n# use the environment variable SLURM_NTASKS_PER_NODE to set\n# the number of cores to use\nregisterDoParallel(cores=(Sys.getenv(\"SLURM_NTASKS_PER_NODE\")))\n\n# bootstrap iteration example\nx <- iris[which(iris[,5] != \"setosa\"), c(1,5)]\niterations <- 10000# Number of iterations to run\n\n# parallel loop\n# note the '%dopar%' instruction\nparallel_time <- system.time({\n  r <- foreach(icount(iterations), .combine=cbind) %dopar% {\n    ind <- sample(100, 100, replace=TRUE)\n    result1 <- glm(x[ind,2]~x[ind,1], family=binomial(logit))\n    coefficients(result1)\n  }\n})[3]\n\n# show the number of parallel workers to be used\ngetDoParWorkers()\n\n# execute the function\nparallel_time\n
#!/bin/bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=16\n#SBATCH --output=doParallel_test.log\n\n# --ntasks-per-node will be used in doParallel_test.R to specify the number\n# of cores to use on the machine.\n\n# load modules\nml R/3.5.1\n\n# execute script\nRscript doParallel_test.R\n

And then submit the job with:

$ sbatch doParallel_test.sbatch\n

Once the job has completed, the output file should contain something like this:

$ cat doParallel_test.out\n[1] \"16\"\nelapsed\n  3.551\n

Bonus points: observe the scalability of the doParallel loop by submitting the same script using a varying number of CPU cores:

$ for i in 2 4 8 16; do\n    sbatch --out=doP_${i}.out --ntasks-per-node=$i doParallel_test.sbatch\ndone\n

When the jobs are done:

$ for i in 2 4 8 16; do\n    printf \"%2i cores: %4.1fs\\n\" $i $(tail -n1 doP_$i.out)\ndone\n 2 cores: 13.6s\n 4 cores:  7.8s\n 8 cores:  4.9s\n16 cores:  3.6s\n
"},{"location":"docs/software/using/R/#multiple-nodes","title":"Multiple nodes","text":"

To distribute parallel R tasks on multiple nodes, you can use the Rmpi package, which provides MPI bindings for R.

To install the Rmpi package, a module providing MPI library must first be loaded. For instance:

$ ml openmpi R\n$ R\n> install.packages(\"Rmpi\")\n

Once the package is installed, the following scripts demonstrate a very basic Rmpi example.

Rmpi-test.RRmpi-test.sbatch
# Example Rmpi script\n\nif (!require(\"Rmpi\")) install.packages(\"Rmpi\")\nlibrary(Rmpi)\n\n# initialize an Rmpi environment\nns <- mpi.universe.size() - 1\nmpi.spawn.Rslaves(nslaves=ns, needlog=TRUE)\n\n# send these commands to the slaves\nmpi.bcast.cmd( id <- mpi.comm.rank() )\nmpi.bcast.cmd( ns <- mpi.comm.size() )\nmpi.bcast.cmd( host <- mpi.get.processor.name() )\n\n# all slaves execute this command\nmpi.remote.exec(paste(\"I am\", id, \"of\", ns, \"running on\", host))\n\n# close down the Rmpi environment\nmpi.close.Rslaves(dellog = FALSE)\nmpi.exit()\n
#!/bin/bash\n\n#SBATCH --nodes=2\n#SBATCH --ntasks=4\n#SBATCH --output=Rmpi-test.log\n\n## load modules\n# openmpi is not loaded by default with R, so it must be loaded explicitly\nml R openmpi\n\n## run script\n# we use '-np 1' since Rmpi does its own task management\nmpirun -np 1 Rscript Rmpi-test.R\n

You can save those scripts as Rmpi-test.R and Rmpi-test.sbatch and then submit your job with:

$ sbatch Rmpi-test.sbatch\n

When the job is done, its output should look like this:

$ cat Rmpi-test.log\n        3 slaves are spawned successfully. 0 failed.\nmaster (rank 0, comm 1) of size 4 is running on: sh-06-33\nslave1 (rank 1, comm 1) of size 4 is running on: sh-06-33\nslave2 (rank 2, comm 1) of size 4 is running on: sh-06-33\nslave3 (rank 3, comm 1) of size 4 is running on: sh-06-34\n$slave1\n[1] \"I am 1 of 4 running on sh-06-33\"\n\n$slave2\n[1] \"I am 2 of 4 running on sh-06-33\"\n\n$slave3\n[1] \"I am 3 of 4 running on sh-06-34\"\n\n[1] 1\n[1] \"Detaching Rmpi. Rmpi cannot be used unless relaunching R.\"\n
"},{"location":"docs/software/using/R/#gpus","title":"GPUs","text":"

Here's a quick example that compares running a matrix multiplication on a CPU and on a GPU using R. It requires submitting a job to a GPU node and the gpuR R package.

gpuR-test.RgpuR-test.sbatch
# Example gpuR script\n\nif (!require(\"gpuR\")) install.packages(\"gpuR\")\nlibrary(gpuR)\n\nprint(\"CPU times\")\nfor(i in seq(1:7)) {\n    ORDER = 64*(2^i)\n    A = matrix(rnorm(ORDER^2), nrow=ORDER)\n    B = matrix(rnorm(ORDER^2), nrow=ORDER)\n    print(paste(i, sprintf(\"%5.2f\", system.time({C = A %*% B})[3])))\n}\n\nprint(\"GPU times\")\nfor(i in seq(1:7)) {\n    ORDER = 64*(2^i)\n    A = matrix(rnorm(ORDER^2), nrow=ORDER)\n    B = matrix(rnorm(ORDER^2), nrow=ORDER)\n    gpuA = gpuMatrix(A, type=\"double\")\n    gpuB = gpuMatrix(B, type=\"double\")\n    print(paste(i, sprintf(\"%5.2f\", system.time({gpuC = gpuA %*% gpuB})[3])))\n}\n
#!/bin/bash\n\n#SBATCH --partition gpu\n#SBATCH --mem 8GB\n#SBATCH --gres gpu:1\n#SBATCH --output=gpuR-test.log\n\n## load modules\n# cuda is not loaded by default with R, so it must be loaded explicitly\nml R cuda\n\nRscript gpuR-test.R\n

After submitting the job with sbatch gpuR-test.sbatch, the output file should contain something like this:

[1] \"CPU times\"\n[1] \"1  0.00\"\n[1] \"2  0.00\"\n[1] \"3  0.02\"\n[1] \"4  0.13\"\n[1] \"5  0.97\"\n[1] \"6  7.56\"\n[1] \"7 60.47\"\n\n[1] \"GPU times\"\n[1] \"1  0.10\"\n[1] \"2  0.04\"\n[1] \"3  0.02\"\n[1] \"4  0.07\"\n[1] \"5  0.39\"\n[1] \"6  2.04\"\n[1] \"7 11.59\"\n

which shows a decent speedup for running on a GPU for the largest matrix sizes.

"},{"location":"docs/software/using/anaconda/","title":"Anaconda","text":""},{"location":"docs/software/using/anaconda/#introduction","title":"Introduction","text":"

Anaconda is a Python/R distribution that aims to simplify package management and deployment for scientific computing. Although it can have merits on individual computers, it's often counter-productive on shared HPC systems like Sherlock.

Avoid using Anaconda on Sherlock

We recommend NOT using Anaconda on Sherlock, and instead consider other options like virtual environments or containers.

"},{"location":"docs/software/using/anaconda/#why-anaconda-should-be-avoided-on-sherlock","title":"Why Anaconda should be avoided on Sherlock","text":"

Anaconda is widely used in several scientific domain like data science, AI/ML, bio-informatics, and is often listed in some software documentation as the recommended (if not only) way to install it

It is a useful solution for simplifying the management of Python and scientific libraries on a personal computer. However, on highly-specialized HPC systems like Sherlock, management of these libraries and dependencies should be done by Stanford Research Computing staff, to ensure compatibility and optimal performance on the cluster hardware.

For instance:

  • Anaconda very often installs software (compilers, scientific libraries etc.) which already exist on our Sherlock as modules, and does so in a sub-optimal fashion, by installing sub-optimal versions and configurations,
  • It installs binaries which are not optimized for the processor architectures on Sherlock,
  • it makes incorrect assumptions about the location of various system libraries,
  • Anaconda installs software in $HOME by default, where it writes large amounts of files. A single Anaconda installation can easily fill up your $HOME directory quota, and makes things difficult to manage,
  • Anaconda installations can't easily be relocated,
  • Anaconda modifies your $HOME/.bashrc file, which can easily cause conflicts and slow things down when you log in.

Worse, a Conda recipe can force the installation of R (even though it's already available on Sherlock). This installation won't perform nearly as well as the version we provide as a module (which uses optimized libraries), or not at all, the jobs launched with it may crash and end up wasting both computing resources and your time.

Installation issues

If you absolutely need to install anaconda/miniconda, please note that because of the large number of files that the installer will try to open, this will likely fail on a login node. So make sure to run the installation on a compute node, for instance using the sh_dev command.

"},{"location":"docs/software/using/anaconda/#what-to-do-instead","title":"What to do instead","text":""},{"location":"docs/software/using/anaconda/#use-a-virtual-environment","title":"Use a virtual environment","text":"

Instead of using Anaconda for your project, or when the installation instructions of the software you want to install are using it, you can use a virtual environment.

A virtual environment offers all the functionality you need to use Python on Sherlock. You can convert Anaconda instructions and use a virtual environment instead, by following these steps:

  1. list the dependencies (also called requirements) of the application you want to use:
    • check if there is a requirements.txt file in the Git repository or in the software sources,
    • or, check the variable install_requires of in the setup.py file, which lists the requirements.
  2. find which dependencies are Python modules and which are libraries provided by Anaconda. For example, CUDA and CuDNN are libraries that Anaconda can install, but which should not be re-installed as they are already available as modules on Sherlock,
  3. remove from the list of dependencies everything which is not a Python module (e.g. cudatoolkit and cudnn),
  4. create a virtual environment to install your dependencies.

And that's it: your software should run, without Anaconda. If you have any issues, please don't hesitate to contact us.

"},{"location":"docs/software/using/anaconda/#use-a-container","title":"Use a container","text":"

In some situations, the complexity of a program's dependencies requires the use of a solution where you can control the entire software environment. In these situations, we recommend using a container.

Tip

Existing Docker images can easily be converted into Apptainer/Singularity images.

The only potential downside of using containers is their size and the associated storage usage. But if your research group plans on using several container images, it could be useful to collect them all in a single location (like $GROUP_HOME) to avoid duplication.

"},{"location":"docs/software/using/clustershell/","title":"ClusterShell","text":""},{"location":"docs/software/using/clustershell/#introduction","title":"Introduction","text":"

ClusterShell is a command-line tool and library that helps running commands in parallel on multiple servers. It allows executing arbitrary commands across multiple hosts. On Sherlock, it provides an easy way to run commands on nodes your jobs are running on, and collect back information. The two most useful commands provided are cluset, which can manipulate lists of nodenames, and clush, which can run commands on multiple nodes at once.

"},{"location":"docs/software/using/clustershell/#more-documentation","title":"More documentation","text":"

The following documentation specifically intended for using ClusterShell on Sherlock. For more complete documentation about ClusterShell in general, please see the ClusterShell documentation.

The ClusterShell library can also be directly be integrated in your Python scripts, to add a wide range of functionality. See the ClusterShell Python API documentation for reference.

"},{"location":"docs/software/using/clustershell/#clustershell-on-sherlock","title":"ClusterShell on Sherlock","text":"

ClusterShell is available on Sherlock and the corresponding module can be loaded with:

$ ml system py-clustershell\n
"},{"location":"docs/software/using/clustershell/#cluset","title":"cluset","text":"

The cluset command can be used to easily manipulate lists of node names, and to expand, fold, or count them:

$ cluset --expand sh03-01n[01-06]\nsh03-01n01 sh03-01n02 sh03-01n03 sh03-01n04 sh03-01n05 sh03-01n06\n\n$ cluset --count sh03-01n[01-06]\n6\n\n$ cluset --fold sh03-01n01 sh03-01n02 sh03-01n03 sh03-01n06\nsh03-01n[01-03,06]\n
"},{"location":"docs/software/using/clustershell/#clush","title":"clush","text":"

The clush command uses the same node list syntax to allow running the same commands simultaneously on those nodes. clush uses SSH to connect to each of these nodes.

Warning

You can only SSH to nodes where your jobs are running, and as a consequence, clush will only work on those nodes.

For instance, to check the load on multiple compute nodes at once:

$ clush -w sh03-01n[01-03] cat /proc/loadavg\nsh03-01n01: 19.48 14.43 11.76 22/731 22897\nsh03-01n02: 13.20 13.29 13.64 14/831 1163\nsh03-01n03: 11.60 11.48 11.82 18/893 23945\n

Gathering identical output

Using the the -b option will regroup similar output lines to make large outputs easier to read. By default, the output of each node will be presented separately.

For instance, without -b:

$ clush -w sh03-01n[01-03] echo ok\nsh03-01n02: ok\nsh03-01n03: ok\nsh03-01n01: ok\n

With -b:

$ clush -bw sh03-01n[01-03] echo ok\n---------------\nsh03-01n[01-03] (3)\n---------------\nok\n
"},{"location":"docs/software/using/clustershell/#slurm-integration","title":"Slurm integration","text":"

On Sherlock, ClusterShell is also tightly integrated with the job scheduler, and can directly provide information about a user's jobs and the nodes they're running on. You can use the following groups to get specific node lists:

group name short name action example @user: @u: list nodes where user has jobs running cluset -f @user:$USER @job: @j: list nodes where job is running cluset -f @job:123456 @nodestate: @node:,@n: list nodes in given state cluset -f @nodestate:idle @partition: @part:,@p: list nodes in given partition cluset -f @partition:gpu

For instance, to get the list of nodes where job 123456 is running:

$ cluset -f @job:123456`\n
"},{"location":"docs/software/using/clustershell/#examples","title":"Examples","text":""},{"location":"docs/software/using/clustershell/#job-information","title":"Job information","text":"

For instance, if job 1988522 from user kilian is running on nodes sh02-01n[59-60], squeue would display this:

$ squeue -u kilian\n       JOBID PARTITION     NAME     USER ST       TIME  NODES NODELIST(REASON)\n     1988522    normal interact   kilian  R       1:30      2 sh02-01n[59-60]\n     1988523    normal interact   kilian  R       1:28      2 sh02-01n[61-62]\n

With ClusterShell, you could get:

  • the list of node names where user kilian has jobs running:

    $ cluset -f @user:kilian\nsh02-01n[59-62]\n
  • the nodes where job 1988522 is running, in an expanded form:

    $ cluset -e @job:1988522\nsh02-01n59 sh02-01n60\n
"},{"location":"docs/software/using/clustershell/#node-states","title":"Node states","text":"

You can also use those binding to get lists of nodes in a particular state, in a given partition. For instance, to list the nodes that are in \"mixed\" state in the dev partition, you can request the intersection between the @state:mixed and @partition:dev node lists:

$ cluset -f @nodestate:mixed -i @partition:dev\nsh02-01n[57-58]\n
"},{"location":"docs/software/using/clustershell/#local-storage","title":"Local storage","text":"

To get a list of files in $L_SCRATCH on all the nodes that are part of job 1988522:

$ $ clush -w@j:1988522 tree $L_SCRATCH\nsh02-01n59: /lscratch/kilian\nsh02-01n59: \u251c\u2500\u2500 1988522\nsh02-01n59: \u2502\u00a0\u00a0 \u2514\u2500\u2500 foo\nsh02-01n59: \u2502\u00a0\u00a0     \u2514\u2500\u2500 bar\nsh02-01n59: \u2514\u2500\u2500 1993608\nsh02-01n59:\nsh02-01n59: 3 directories, 1 file\nsh02-01n60: /lscratch/kilian\nsh02-01n60: \u2514\u2500\u2500 1988522\nsh02-01n60:\nsh02-01n60: 1 directory, 0 files\n
"},{"location":"docs/software/using/clustershell/#process-tree","title":"Process tree","text":"

To display your process tree across all the nodes your jobs are running on:

$ clush -w @u:$USER pstree -au $USER\nsh02-09n71: mpiBench\nsh02-09n71:   `-3*[{mpiBench}]\nsh02-09n71: mpiBench\nsh02-09n71:   `-3*[{mpiBench}]\nsh02-09n71: mpiBench\nsh02-09n71:   `-3*[{mpiBench}]\nsh02-09n71: mpiBench\nsh02-09n71:   `-3*[{mpiBench}]\nsh02-10n01: mpiBench\nsh02-10n01:   `-3*[{mpiBench}]\nsh02-10n01: mpiBench\nsh02-10n01:   `-3*[{mpiBench}]\nsh02-10n01: mpiBench\nsh02-10n01:   `-3*[{mpiBench}]\nsh02-10n01: mpiBench\nsh02-10n01:   `-3*[{mpiBench}]\n
"},{"location":"docs/software/using/clustershell/#cpu-usage","title":"CPU usage","text":"

To get the CPU and memory usage of your processes in job 2003264:

$ clush -w @j:2003264 ps -u$USER -o%cpu,rss,cmd\nsh03-07n12: %CPU   RSS CMD\nsh03-07n12:  0.0  4780 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000\nsh03-07n12:  0.0  4784 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000\nsh03-07n12:  0.0  4784 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000\nsh03-07n12:  0.0  4780 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000\nsh03-06n06: %CPU   RSS CMD\nsh03-06n06:  0.0 59596 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000\nsh03-06n06:  0.0 59576 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000\nsh03-06n06:  0.0 59580 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000\nsh03-06n06:  0.0 59588 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000\nsh03-06n05: %CPU   RSS CMD\nsh03-06n05:  0.0  7360 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000\nsh03-06n05:  0.0  7328 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000\nsh03-06n05:  0.0  7344 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000\nsh03-06n05:  0.0  7340 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000\nsh03-06n11: %CPU   RSS CMD\nsh03-06n11: 17.0 59604 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000\nsh03-06n11: 17.0 59588 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000\nsh03-06n11: 17.0 59592 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000\nsh03-06n11: 17.0 59580 /home/users/kilian/benchs/MPI/mpiBench/mpiBench -i 1000000\n
"},{"location":"docs/software/using/clustershell/#gpu-usage","title":"GPU usage","text":"

To show what's running on all the GPUs on the nodes associated with job 123456:

$ clush -bw @job:123456 nvidia-smi --format=csv --query-compute-apps=process_name,utilization.memory\nsh03-12n01: /share/software/user/open/python/3.6.1/bin/python3.6, 15832 MiB\nsh02-12n04: /share/software/user/open/python/3.6.1/bin/python3.6, 15943 MiB\n
"},{"location":"docs/software/using/julia/","title":"Julia","text":""},{"location":"docs/software/using/julia/#introduction","title":"Introduction","text":"

Julia is a high-level general-purpose dynamic programming language that was originally designed to address the needs of high-performance numerical analysis and computational science, without the typical need of separate compilation to be fast, also usable for client and server web use, low-level systems programming or as a specification language. Julia aims to create an unprecedented combination of ease-of-use, power, and efficiency in a single language.

"},{"location":"docs/software/using/julia/#more-documentation","title":"More documentation","text":"

The following documentation is specifically intended for using Julia on Sherlock. For more complete documentation about Julia in general, please see the Julia documentation.

"},{"location":"docs/software/using/julia/#julia-on-sherlock","title":"Julia on Sherlock","text":"

Julia is available on Sherlock and the corresponding module can be loaded with:

$ ml julia\n

For a list of available versions, you can execute ml spider julia at the Sherlock prompt, or refer to the Software list page.

"},{"location":"docs/software/using/julia/#using-julia","title":"Using Julia","text":"

Once your environment is configured (ie. when the julia module is loaded), julia can be started by simply typing julia at the shell prompt:

$ julia\n\n_\n   _       _ _(_)_     |  Documentation: https://docs.julialang.org\n  (_)     | (_) (_)    |\n   _ _   _| |_  __ _   |  Type \"?\" for help, \"]?\" for Pkg help.\n  | | | | | | |/ _` |  |\n  | | |_| | | | (_| |  |  Version 1.0.0 (2018-08-08)\n _/ |\\__'_|_|_|\\__'_|  |  Official https://julialang.org/ release\n|__/                   |\n\njulia>\n

For a listing of command line options:

$ julia --help\n\njulia [switches] -- [programfile] [args...]\n -v, --version             Display version information\n -h, --help                Print this message\n\n -J, --sysimage <file>     Start up with the given system image file\n -H, --home <dir>          Set location of `julia` executable\n --startup-file={yes|no}   Load `~/.julia/config/startup.jl`\n --handle-signals={yes|no} Enable or disable Julia's default signal handlers\n --sysimage-native-code={yes|no}\n                           Use native code from system image if available\n --compiled-modules={yes|no}\n                           Enable or disable incremental precompilation of modules\n\n -e, --eval <expr>         Evaluate <expr>\n -E, --print <expr>        Evaluate <expr> and display the result\n -L, --load <file>         Load <file> immediately on all processors\n\n -p, --procs {N|auto}      Integer value N launches N additional local worker processes\n                           \"auto\" launches as many workers as the number\n                           of local CPU threads (logical cores)\n --machine-file <file>     Run processes on hosts listed in <file>\n\n -i                        Interactive mode; REPL runs and isinteractive() is true\n -q, --quiet               Quiet startup: no banner, suppress REPL warnings\n
"},{"location":"docs/software/using/julia/#running-a-julia-script","title":"Running a Julia script","text":"

A Julia program is easy to run on the command line outside of its interactive mode.

Here is an example where we create a simple Hello World program and launch it with Julia

$ echo 'println(\"hello world\")' > helloworld.jl\n

That script can now simply be executed by calling julia <script_name>:

$ julia helloworld.jl\nhello world\n
"},{"location":"docs/software/using/julia/#submitting-a-julia-job","title":"Submitting a Julia job","text":"

Here's an example Julia sbatch script that can be submitted via sbatch:

julia_test.sbatch
#!/bin/bash\n\n#SBATCH --time=00:10:00\n#SBATCH --mem=4G\n#SBATCH --output=julia_test.log\n\n# load the module\nml julia\n\n# run the Julia application\njulia helloworld.jl\n

You can save this script as julia_test.sbatch and submit it to the scheduler with:

$ sbatch julia_test.sbatch\n

Once the job is done, you should get a julia_test.log file in the current directory, with the following contents:

$ cat julia_test.log\nhello world\n
"},{"location":"docs/software/using/julia/#julia-packages","title":"Julia packages","text":"

Julia provides an ever-growing list of packages that can be used to install add-on functionality to your Julia code.

Installing packages with Julia is very simple. Julia includes a package module in its base installation that handles installing, updating, and removing packages.

First import the Pkg module:

julia> import Pkg\njulia> Pkg.status()\n    Status `~/.julia/environments/v1.0/Project.toml`\n

Julia packages only need to be installed once

You only need to install Julia packages once on Sherlock. Since fielsystems are shared, packages installed on one node will immediately be available on all nodes on the cluster.

"},{"location":"docs/software/using/julia/#installing-packages","title":"Installing packages","text":"

You can first check the status of packages installed on Julia using the status function of the Pkg module:

julia> Pkg.status()\nNo packages installed.\n

You can then add packages using the add function of the Pkg module:

julia> Pkg.add(\"Distributions\")\nINFO: Cloning cache of Distributions from git://github.com/JuliaStats/Distributions.jl.git\nINFO: Cloning cache of NumericExtensions from git://github.com/lindahua/NumericExtensions.jl.git\nINFO: Cloning cache of Stats from git://github.com/JuliaStats/Stats.jl.git\nINFO: Installing Distributions v0.2.7\nINFO: Installing NumericExtensions v0.2.17\nINFO: Installing Stats v0.2.6\nINFO: REQUIRE updated.\n

Using the status function again, you can see that the package and its dependencies have been installed:

julia> Pkg.status()\nRequired packages:\n - Distributions                 0.2.7\nAdditional packages:\n - NumericExtensions             0.2.17\n - Stats                         0.2.6\n
"},{"location":"docs/software/using/julia/#updating-packages","title":"Updating Packages","text":"

The update function of the Pkg module can update all packages installed:

julia> Pkg.update()\nINFO: Updating METADATA...\nINFO: Computing changes...\nINFO: Upgrading Distributions: v0.2.8 => v0.2.10\nINFO: Upgrading Stats: v0.2.7 => v0.2.8\n
"},{"location":"docs/software/using/julia/#removing-packages","title":"Removing packages","text":"

The remove function of the Pkg module can remove any packages installed as well:

julia> Pkg.rm(\"Distributions\")\nINFO: Removing Distributions v0.2.7\nINFO: Removing Stats v0.2.6\nINFO: Removing NumericExtensions v0.2.17\nINFO: REQUIRE updated.\n\njulia> Pkg.status()\nRequired packages:\n - SHA                           0.3.2\n\njulia> Pkg.rm(\"SHA\")\nINFO: Removing SHA v0.3.2\nINFO: REQUIRE updated.\n\njulia> Pkg.status()\nNo packages installed.\n
"},{"location":"docs/software/using/julia/#examples","title":"Examples","text":""},{"location":"docs/software/using/julia/#parallel-job","title":"Parallel job","text":"

Julia can natively spawn parallel workers across multiple compute nodes, without using MPI. There are two main modes of operation:

  1. ClusterManager: in this mode, you can spawn workers from within the Julia interpreter, and each worker will actually submit jobs to the scheduler, executing instructions within those jobs.

  2. using the --machine-file option: here, you submit a SLURM job and run the Julia interpreter in parallel mode within the job's resources.

The second mode is easier to use, and more convenient, since you have all your resources available and ready to use when the job starts. In mode 1, you'll need to wait for jobs to be dispatched and executed inside Julia.

Here is a quick example on how to use the --machine-file option on Sherlock.

Given following Julia script (julia_parallel_test.jl) that will print a line with the process id and the node it's executing on, in parallel:

julia_parallel_test.jl
using Distributed\n@everywhere println(\"process: $(myid()) on host $(gethostname())\")\n

You can submit the following job:

julia_test.sbatch
#!/bin/bash\n#SBATCH --nodes 2\n#SBATCH --ntasks-per-node 4\n#SBATCH --time 5:0\n\nml julia\njulia --machine-file <(srun hostname -s)  ./julia_parallel_test.jl\n

Save as julia_test.sbatch, and then:

$ sbatch  julia_test.sbatch\n

It will:

  1. Request 2 nodes, 4 tasks per node (8 tasks total)
  2. load the julia module
  3. Run Julia in parallel with a machine file that is automatically generated, listing the nodes that are assigned to your job.

It should output something like this in your job's output file:

process: 1 on host sh-06-33.int\n      From worker 2:    process: 2 on host sh-06-33.int\n      From worker 3:    process: 3 on host sh-06-34.int\n      From worker 5:    process: 5 on host sh-06-33.int\n      From worker 4:    process: 4 on host sh-06-33.int\n      From worker 6:    process: 6 on host sh-06-33.int\n      From worker 8:    process: 8 on host sh-06-34.int\n      From worker 9:    process: 9 on host sh-06-34.int\n      From worker 7:    process: 7 on host sh-06-34.int\n
"},{"location":"docs/software/using/mariadb/","title":"MariaDB","text":""},{"location":"docs/software/using/mariadb/#introduction","title":"Introduction","text":"

MariaDB is a community-developed fork of the MySQL relational database management system. It is completely compatible with MySQL and could be use as a drop-in replacement in the vast majority of cases.

"},{"location":"docs/software/using/mariadb/#more-documentation","title":"More documentation","text":"

The following documentation specifically intended for using MariaDB on Sherlock. For more complete documentation about MariaDB in general, please see the MariaDB documentation.

"},{"location":"docs/software/using/mariadb/#mariadb-on-sherlock","title":"MariaDB on Sherlock","text":"

We don't provide any centralized database service on Sherlock, but we provide a centralized installation of MariaDB, and each user is welcome to start their own instance of the database server to fit their jobs' needs.

The overall process to run an instance of MariaDB on Sherlock would look like this:

  1. configure and initialize your environment so you can start a database instance under your user account,
  2. start the database server,
  3. run SQL queries from the same node (via a local socket), or from other nodes and/or jobs (via the network).
"},{"location":"docs/software/using/mariadb/#single-node-access","title":"Single-node access","text":"

In that example, the database server and client will run within the same job, on the same compute node.

"},{"location":"docs/software/using/mariadb/#preparation","title":"Preparation","text":"

You first need to let MariaDB know where to store its database, where to log things, and how to allow connections from clients. The commands below only need to be executed once.

For this, you'll need to create a .my.cnf file in your home directory. Assuming you'll want to store your database files in a db/ directory in your $SCRATCH folder, you can run the following commands:

$ export DB_DIR=$SCRATCH/db\n$ mkdir $DB_DIR\n\n$ cat << EOF > ~/.my.cnf\n[mysqld]\ndatadir=$DB_DIR\nsocket=$DB_DIR/mariadb.sock\nuser=$USER\nsymbolic-links=0\nskip-networking\n\n[mysqld_safe]\nlog-error=$DB_DIR/mariadbd.log\npid-file=$DB_DIR/mariadbd.pid\n\n[mysql]\nsocket=$DB_DIR/mariadb.sock\nEOF\n

.my.cnf doesn't support environment variables

Please note that if you edit your ~/.my.cnf file directly in a file editor, without using the HEREDOC syntax above, environment variables such as $DB_DIR, $HOME or $USER won't work: you will need to specify absolute paths explicitly, such as /scratch/users/kilian/db/mariadbd.log.

If you use the HEREDOC syntax, you can verify that the resulting .my.cnf file does actually contain full paths, and not environment variables anymore.

Once you have the .my.cnf file in place, you need to initialize your database with some internal data that MariaDB needs. In the same terminal, run the following commands:

$ ml system mariadb\n$ $MARIADB_DIR/scripts/mysql_install_db --basedir=$MARIADB_DIR  --datadir=$DB_DIR\n
"},{"location":"docs/software/using/mariadb/#start-the-server","title":"Start the server","text":"

You can now start the MariaDB server. For this, first get an allocation on a compute node, note the hostname of the compute node your job has been allocated, load the mariadb module, and then run the mysqld_safe process:

$ srun --pty bash\n$ echo $SLURM_JOB_NODELIST\nsh-01-01\n$ ml system mariadb\n$ mysqld_safe\n180705 18:14:27 mysqld_safe Logging to '/home/users/kilian/db/mysqld.log'.\n180705 18:14:28 mysqld_safe Starting mysqld daemon with databases from /home/users/kilian/db/\n

The mysqld_safe will be blocking, meaning it will not give the prompt back for as long as the MariaDB server runs.

If it does return on its own, it probably means that something went wrong, and you'll find more information about the issue in the $DB_DIR/mysqld.log file you defined in ~/.my.cnf.

"},{"location":"docs/software/using/mariadb/#run-queries","title":"Run queries","text":"

You're now ready to run queries against that MariaDB instance, from the same node your job is running on.

From another terminal on Sherlock, connect to your job's compute node (here, it's sh-01-01, as shown above), load the mariadb module, and then run the mysql command: it will open the MariaDB shell, ready to run your SQL queries:

$ ssh sh-01-01\n$ ml system mariadb\n$ mysql\nWelcome to the MariaDB monitor.  Commands end with ; or \\g.\nYour MariaDB connection id is 8\nServer version: 10.2.11-MariaDB Source distribution\n\nCopyright (c) 2000, 2017, Oracle, MariaDB Corporation Ab and others.\n\nType 'help;' or '\\h' for help. Type '\\c' to clear the current input statement.\n\nMariaDB [(none)]>\n

Once you're done with your MariaDB instance, you can just terminate your job, and all the processes will be terminated automatically.

"},{"location":"docs/software/using/mariadb/#multi-node-access","title":"Multi-node access","text":"

In case you need to run a more persistent instance of MariaDB, you can for instance submit a dedicated job to run the server, make it accessible over the network, and run queries from other jobs and/or nodes.

"},{"location":"docs/software/using/mariadb/#enable-network-access","title":"Enable network access","text":"

The preparation steps are pretty similar to the single-node case, except the MariaDB server instance will be accessed over the network rather than through a local socket.

Network access must be secured

When running an networked instance of MariaDB, please keep in mind that any user on Sherlock will be able to connect to the TCP ports that mysqld runs on, and that proper configuration must be done to prevent unauthrozied access.

Like in the single-node case, you need to create a ~/.my.cnf file, but without the skip-networking directive.

$ export DB_DIR=$SCRATCH/db\n$ mkdir $DB_DIR\n\n$ cat << EOF > ~/.my.cnf\n[mysqld]\ndatadir=$DB_DIR\nsocket=$DB_DIR/mariadb.sock\nuser=$USER\nsymbolic-links=0\n\n[mysqld_safe]\nlog-error=$DB_DIR/mariadbd.log\npid-file=$DB_DIR/mariadbd.pid\n\n[mysql]\nsocket=$DB_DIR/mariadb.sock\nEOF\n

And then initiate the database:

$ ml system mariadb\n$ $MARIADB_DIR/scripts/mysql_install_db --basedir=$MARIADB_DIR  --datadir=$DB_DIR\n
"},{"location":"docs/software/using/mariadb/#secure-access","title":"Secure access","text":"

We will now set a password for the MariaDB root user to a random string, just for the purpose of preventing unauthorized access, since we won't need it for anything.

We will actually create a MariaDB user with all privileges on the databases, that will be able to connect to this instance from any node. This user will need a real password, though. So please make sure to replace the my-secure-password string below by the actual password of your choice.

Choose a proper password

This password will only be used to access this specific instance of MariaDB. Note that anybody knowing that password will be allowed to connect to your MariaDB instances and modify data in the tables.

  • do NOT literally use my-secure-password
  • do NOT use your SUNet ID password

Once you've chosen your password, you can start the mysqld process on a compute node, like before:

$ srun --pty bash\n$ echo $SLURM_JOB_NODELIST\nsh-01-01\n$ ml system mariadb\n$ mysqld_safe\n

And then, from another terminal, run the following commands to secure access to your MariaDB database.

$ ssh sh-01-01\n$ mysql -u root << EOF\nUPDATE mysql.user SET Password=PASSWORD(RAND()) WHERE User='root';\nDELETE FROM mysql.user WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1');\nDELETE FROM mysql.user WHERE User='';\nDELETE FROM mysql.db WHERE Db='test' OR Db='test_%';\nGRANT ALL PRIVILEGES ON *.* TO '$USER'@'%' IDENTIFIED BY 'my-secure-password' WITH GRANT OPTION;\nFLUSH PRIVILEGES;\nEOF\n

Once you've done that, you're ready to terminate that interactive job, and start a dedicated MariaDB server job.

"},{"location":"docs/software/using/mariadb/#start-mariadb-in-a-job","title":"Start MariaDB in a job","text":"

You can use the following mariadb.sbatch job as a template:

#!/bin/bash\n\n#SBATCH --job-name=mariadb\n#SBATCH --time=8:0:0\n#SBATCH --dependency=singleton\n\nml system mariadb\nmysqld_safe\n

and submit it with:

$ sbatch mariadb.sbatch\n

Concurrent instances will lead to data corruption

An important thing to keep in mind is that having multiple instances of a MariaDB server running at the same time, using the same database files, will certainly lead to catastrophic situations and the corruption of those files.

To prevent this from happening, the --dependency=singleton job submission option will make sure that only one instance of that job (based on its name and user) will run at any given time.

"},{"location":"docs/software/using/mariadb/#connect-to-the-running-instance","title":"Connect to the running instance","text":"

Now, from any node on Sherlock, whether from a login node, an interactive job, or a batch job, using the mysql CLI or any application binding in any language, you should be able to connect to your running MariaDB instance,

First, identify the node your job is running on with squeue:

$ squeue -u $USER -n mariadb\n             JOBID PARTITION     NAME     USER ST       TIME  NODES NODELIST(REASON)\n          21383445    normal  mariadb   kilian  R       0:07      1 sh-01-02\n

and then, point your MariaDB client to that node:

$ ml system mariadb\n$ mysql -h sh-01-02 -p\nEnter password:\nWelcome to the MariaDB monitor.  Commands end with ; or \\g.\nYour MariaDB connection id is 15\nServer version: 10.2.11-MariaDB Source distribution\n\nCopyright (c) 2000, 2017, Oracle, MariaDB Corporation Ab and others.\n\nType 'help;' or '\\h' for help. Type '\\c' to clear the current input statement.\n\nMariaDB [(none)]>\n

That's it! You can now run SQL queries from anywhere on Sherlock to your own MariaDB instance.

"},{"location":"docs/software/using/mariadb/#persistent-db-instances","title":"Persistent DB instances","text":"

SQL data is persistent

All the data you import in your SQL databases will be persistent across jobs. Meaning that you can run a PostgreSQL server job for the day, import data in its database, stop the job, and resubmit the same PostgreSQL server job the next day: all your data will still be there as long as the location you've chosen for your database (the $DB_DIR defined in the Preparation steps) is on a persistent storage location.

If you need database access for more than the maximum runtime of a job, you can use the instructions provided to define self-resubmitting recurring jobs and submit long-running database instances.

"},{"location":"docs/software/using/matlab/","title":"Matlab","text":""},{"location":"docs/software/using/matlab/#introduction","title":"Introduction","text":"

MATLAB is a numerical computing environment and proprietary programming language developed by MathWorks.

"},{"location":"docs/software/using/matlab/#more-documentation","title":"More documentation","text":"

The following documentation is specifically intended for using Matlab on Sherlock. For more complete documentation about Matlab in general, please see the official MATLAB documentation.

"},{"location":"docs/software/using/matlab/#matlab-on-sherlock","title":"MATLAB on Sherlock","text":""},{"location":"docs/software/using/matlab/#licensing","title":"Licensing","text":"

MATLAB is a commercial software suite, which is now available to no cost for all Stanford Faculty, students, and staff.

Note: a number of free, open-source alternatives exist and can be used in many situations: Octave, R, Julia, or Python are all available on Sherlock, and can often replace MATLAB with good results.

"},{"location":"docs/software/using/matlab/#using-matlab","title":"Using MATLAB","text":"

The MATLAB module can be loaded with:

$ ml load matlab\n

This will load the current default version. For a list of available versions run ml spider matlab at the Sherlock prompt, or refer to the Software list page.

MATLAB can't run on login nodes

Running MATLAB directly on login nodes is not supported and will produce the following message:

-----------------------------------------------------------------------\nWARNING: running MATLAB directly on login nodes is not supported.  Please\nmake sure you request an interactive session on a compute node with \"sh_dev\"\nfor instance) before launching MATLAB interactively.\n-----------------------------------------------------------------------\n
You will need to submit a job or request an interactive session on a compute node before you can start MATLAB.

Once you are on a compute node and your environment is configured (ie. when the matlab module is loaded), MATLAB can be started by simply typing matlab at the shell prompt.

$ sh_dev\n$ ml load matlab\n$ matlab\nMATLAB is selecting SOFTWARE OPENGL rendering.\n                          < M A T L A B (R) >\n                Copyright 1984-2019 The MathWorks, Inc.\n                R2019a (9.6.0.1072779) 64-bit (glnxa64)\n                             March 8, 2019\n\nTo get started, type doc.\nFor product information, visit www.mathworks.com.\n\n>>\n

For a listing of command line options:

$ matlab -help\n
"},{"location":"docs/software/using/matlab/#running-a-matlab-script","title":"Running a MATLAB script","text":"

There are several ways to launch a MATLAB script on the command line, as documented in the MATLAB documentation:

Method Output matlab -nodesktop < script.m MATLAB will run the code from script.m and display output on stdout matlab -nodisplay Start MATLAB in CLI mode, without its graphical desktop environment matlab -nojvm do not start the JVM1"},{"location":"docs/software/using/matlab/#matlab-gui","title":"MATLAB GUI","text":"

It's often best to use your laptop or desktop to develop, debug MATLAB and visualize the output. If you do need to use the MATLAB GUI on a large cluster like Sherlock, you will need to enable X11 forwarding in your SSH client.

For instance:

$ ssh -X <YourSUNetID>@login.sherlock.stanford.edu\n

And then, once on Sherlock:

$ sh_dev\n$ ml load matlab\n$ matlab\n

For more info on X11 forwarding, you can refer to this UIT page.

"},{"location":"docs/software/using/matlab/#examples","title":"Examples","text":""},{"location":"docs/software/using/matlab/#simple-matlab-job","title":"Simple MATLAB job","text":"

Here is an example MATLAB batch script that can submitted with sbatch:

#!/bin/bash\n#SBATCH --job-name=matlab_test\n#SBATCH --output=matlab_test.\"%j\".out\n#SBATCH --error=matlab_test.\"%j\".err\n#SBATCH --partition=normal\n#SBATCH --time=00:10:00\n#SBATCH --cpus-per-task=1\n#SBATCH --mem=8G\n#SBATCH --mail-type=ALL\n\nmodule load matlab\nmatlab -nodisplay < example.m\n

This simple job, named matlab_test will run a MATLAB script named example.m in the normal partition, for a duration of 10 minutes, and use 1 CPU and 8GB of RAM. It will send you an email (to whatever email you used wen you signed up for Sherlock) when it begins, ends or fails.

Additionally, to aid in debugging, it will log any errors and output to the files matlab_test.JOBID.{out,err} with the jobid appended to the filename (%j).

To create the script, open a text editor on Sherlock, copy the contents of the script, and save it as matlab_test.sbatch

Then, submit the job with the sbatch command:

$ sbatch matlab_test.sbatch\nSubmitted batch job 59942277\n

You can check the status of the job with the squeue command, and check the contents of the matlab_test.JOBID.{out,err} files to see the results.

"},{"location":"docs/software/using/matlab/#parallel-loop","title":"Parallel loop","text":"

You can run your MATLAB code across multiple CPUs on Sherlock using parfor loops, to take advantage of the multiple CPU cores that each node features. You can submit a job requesting as many CPUs as there are on a node in a single job. The key is to grab the SLURM environment variable $SLURM_CPUS_PER_TASK and create the worker pool in your MATLAB code with:

parpool('local', str2num(getenv('SLURM_CPUS_PER_TASK')))\n

Here is an example of a sbatch submission script that requests 16 CPUs on a node, and runs a simple MATLAB script using parfor.

Save the two scripts below as parfor.sbatch and parfor_loop.m:

parfor.sbatchparfor_loop.m
#!/bin/bash\n#SBATCH -J pfor_matlab\n#SBATCH -o pfor\".%j\".out\n#SBATCH -e pfor\".%j\".err\n#SBATCH -t 20:00\n#SBATCH -p normal\n#SBATCH -c 16\n#SBATCH --mail-type=ALL\n\nmodule load matlab\nmatlab -batch parfor_loop\n
%============================================================================\n% Parallel Monte Carlo calculation of PI\n%============================================================================\nparpool('local', str2num(getenv('SLURM_CPUS_PER_TASK')))\nR = 1;\ndarts = 1e7;\ncount = 0;\ntic\nparfor i = 1:darts\n   % Compute the X and Y coordinates of where the dart hit the...............\n   % square using Uniform distribution.......................................\n   x = R*rand(1);\n   y = R*rand(1);\n   if x^2 + y^2 <= R^2\n      % Increment the count of darts that fell inside of the.................\n      % circle...............................................................\n     count = count + 1; % Count is a reduction variable.\n   end\nend\n% Compute pi.................................................................\nmyPI = 4*count/darts;\nT = toc;\nfprintf('The computed value of pi is %8.7f.n',myPI);\nfprintf('The parallel Monte-Carlo method is executed in %8.2f seconds.n', T);\ndelete(gcp);\nexit;\n

You can now submit the job with the following command:

sbatch parfor.sbatch\n

If you run htop or pstree -u $USER on the compute node that is running your job, you will see all 16 cores allocated to your MATLAB code.

You can also try that same job with different numbers of CPUs, and see how well it scales.

  1. MATLAB uses the Java\u00ae Virtual Machine (JVM\u2122) software to run the desktop and to display graphics. The -nojvm option enables you to start MATLAB without the JVM. Using this option minimizes memory usage and improves initial start-up speed, but restricts functionality.\u00a0\u21a9

"},{"location":"docs/software/using/perl/","title":"Perl","text":""},{"location":"docs/software/using/perl/#introduction","title":"Introduction","text":"

Perl is a high-level, general-purpose, interpreted, dynamic programming language. Originally developed by Larry Wall in 1987 as a general-purpose Unix scripting language to make report processing easier, it has since undergone many changes and revisions.

Perl provides a framework allowing users to easily extend the language by installing new modules in their local environment. The Comprehensive Perl Archive Network (CPAN1) is an archive of over 25,000 distributions of software written in Perl, as well as documentation for it. It is searchable at http://metacpan.org or http://search.cpan.org and mirrored in over 270 locations around the world.

"},{"location":"docs/software/using/perl/#more-documentation","title":"More documentation","text":"

The following documentation specifically intended for using Perl on Sherlock. For more complete documentation about Perl in general, please see the Perl documentation.

"},{"location":"docs/software/using/perl/#perl-modules-on-sherlock","title":"Perl modules on Sherlock","text":"

To install Perl modules from CPAN, we recommend using the (provided) App::cpanminus module and local::lib modules:

  • App::cpanminus is a popular alternative CPAN client that can be used to manage Perl distributions. It has many great features, including uninstalling modules.
  • local::lib allows users to install Perl modules in the directory of their choice (typically their home directory) without administrative privileges.

Both are already installed on Sherlock, and are automatically enabled and configured when you load the perl module. You don't need to add anything in your ~/.bashrc file, the Sherlock perl module will automatically create everything that is required so you can directly run a command to install Perl modules locally.

"},{"location":"docs/software/using/perl/#installation","title":"Installation","text":"

Perl modules installation is only necessary once

You only need to install Perl modules once on Sherlock. Since fielsystems are shared, modules installed on one node will immediately be available on all nodes on the cluster.

As an example, to install the DateTime::TimeZone module, you can do the following:

$ ml perl\n$ cpanm DateTime::TimeZone\n
"},{"location":"docs/software/using/perl/#usage","title":"Usage","text":"

Once installed, you can use the Perl modules directly, no specific options or syntax is required.

For instance, to check that the DateTime::TimeZone module is correctly installed:

$ perl -MDateTime::TimeZone -e 'print $DateTime::TimeZone::VERSION . \"\\n\"';\n2.13\n
"},{"location":"docs/software/using/perl/#uninstallation","title":"Uninstallation","text":"

To uninstall a Perl module:

$ cpanm -U DateTime::TimeZone\n
  1. CPAN can denote either the archive network itself, or the Perl program that acts as an interface to the network and as an automated software installer (somewhat like a package manager). Most software on CPAN is free and open source.\u00a0\u21a9

"},{"location":"docs/software/using/postgresql/","title":"PostgreSQL","text":""},{"location":"docs/software/using/postgresql/#introduction","title":"Introduction","text":"

PostgreSQL is a powerful, open source object-relational database system with a strong focus on reliability, feature robustness, and performance.

"},{"location":"docs/software/using/postgresql/#more-documentation","title":"More documentation","text":"

The following documentation specifically intended for using PostgreSQL on Sherlock. For more complete documentation about PostgreSQL in general, please see the PostgreSQL documentation.

"},{"location":"docs/software/using/postgresql/#postgresql-on-sherlock","title":"PostgreSQL on Sherlock","text":"

We don't provide any centralized database service on Sherlock, but we provide a centralized installation of PostgreSQL, and each user is welcome to start their own instance of the database server to fit their jobs' needs.

The overall process to run an instance of PostgreSQL on Sherlock would look like this:

  1. configure and initialize your environment so you can start a database instance under your user account,
  2. start the database server,
  3. run SQL queries from the same node (via a local socket), or from other nodes and/or jobs (via the network).
"},{"location":"docs/software/using/postgresql/#single-node-access","title":"Single-node access","text":"

In that example, the database server and client will run within the same job, on the same compute node.

"},{"location":"docs/software/using/postgresql/#preparation","title":"Preparation","text":"

You first need to let PostgreSQL know where to store its database. The commands below only need to be executed once.

Assuming you'll want to store your database files in a db/ directory in your $SCRATCH folder, you can run the following commands:

$ export DB_DIR=$SCRATCH/db\n$ mkdir $DB_DIR\n

Once you have your $DB_DIR in place, you need to initialize your database with some internal data that PostgreSQL needs. In the same terminal, run the following commands:

$ ml system postgresql\n$ initdb $DB_DIR\n
"},{"location":"docs/software/using/postgresql/#start-the-server","title":"Start the server","text":"

You can now start the PostgreSQL server. For this, first get an allocation on a compute node, note the hostname of the compute node your job has been allocated, load the postgresql module, and then run the postgresql server:

$ srun --pty bash\n$ echo $SLURM_JOB_NODELIST\nsh-01-01\n$ ml system postgresql\n$ export DB_DIR=$SCRATCH/db\n$ postgres -D $DB_DIR\n[...]\n2018-10-09 17:42:08.094 PDT [3841] LOG:  database system is ready to accept connections\n

The postgres process will be blocking, meaning it will not give the prompt back for as long as the PostgreSQL server runs.

"},{"location":"docs/software/using/postgresql/#run-queries","title":"Run queries","text":"

You're now ready to run queries against that PostgreSQL instance, from the same node your job is running on.

From another terminal on Sherlock, connect to your job's compute node (here, it's sh-01-01, as shown above), load the postgresql module, and then run the createdb command: it will create a database that you can use as a testbed:

$ ssh sh-01-01\n$ ml system postgresql\n$ createdb test_db\n

Once this is done, from the same shell, you can run the psql command, which will open the PostgreSQL shell, ready to run your SQL queries:

$ psql test_db\npsql (10.5)\nType \"help\" for help.\n\ntest_db=#\n

Once you're done with your PostgreSQL instance, you can just terminate your job, and all the processes will be terminated automatically.

"},{"location":"docs/software/using/postgresql/#multi-node-access","title":"Multi-node access","text":"

In case you need to run a more persistent instance of PostgreSQL, you can for instance submit a dedicated job to run the server, make it accessible over the network, and run queries from other jobs and/or nodes.

"},{"location":"docs/software/using/postgresql/#enable-network-access","title":"Enable network access","text":"

The preparation steps are pretty similar to the single-node case, except the PostgreSQL server instance will be accessed over the network rather than through a local socket.

Network access must be secured

When running an networked instance of PostgreSQL, please keep in mind that any user on Sherlock could potentially be able to connect to the TCP ports that postgres runs on, and that proper configuration must be done to prevent unauthorized access.

Like in the single-node case, you need to start the postgres server process, but with the -i option to enable network connections, and define user access in your $DB_DIR/pg_hba.conf file (see below).

"},{"location":"docs/software/using/postgresql/#secure-access","title":"Secure access","text":"

To allow network connections to the database server, a password will need to be defined for the PostgreSQL user. That will allow this user to connect to the PostgreSQL instance from any node. Please make sure to replace the my-secure-password string below by the actual password of your choice.

Choose a proper password

This password will only be used to access this specific instance of PostgreSQL. Note that anybody knowing that password will be allowed to connect to your PostgreSQL instances and modify data in the tables.

  • do NOT use my-secure-password
  • do NOT use your SUNet ID password

Once you've chosen your password, you can now start the PostgreSQL server on a compute, as described in the previous section, initialize the database, and set the user password:

$ srun --pty bash\n\n$ echo $SLURM_JOB_NODELIST\nsh-01-01\n$ export DB_DIR=$SCRATCH/db\n$ mkdir $DB_DIR\n\n$ ml system postgresql\n$ initdb $DB_DIR\n$ createdb test_db\n\n$ psql -c \"ALTER USER $USER PASSWORD 'my-secure-password';\" test_db\n

Then, we need to edit the $DB_DIR/ph_hba.conf file to allow network access for user $USER:

$ cat << EOF > $DB_DIR/pg_hba.conf\nlocal   all             all                                     trust\nhost    all             all             127.0.0.1/32            trust\nhost    all             all             ::1/128                 trust\nhost    all             $USER           samenet                 md5\nEOF\n

Once you've done that, you're ready to terminate that interactive job, and start a dedicated PostgreSQL server job.

$ pg_ctl stop -D $DB_DIR\n$ logout\n
"},{"location":"docs/software/using/postgresql/#start-postgresql-in-a-job","title":"Start PostgreSQL in a job","text":"

You can use the following postgresql.sbatch job as a template:

#!/bin/bash\n\n#SBATCH --job-name=postgresql\n#SBATCH --time=8:0:0\n#SBATCH --dependency=singleton\n\nexport DB_DIR=$SCRATCH/db\n\nml system postgresql\n\npostgres -i -D $DB_DIR\n

and submit it with:

$ sbatch postgresql.sbatch\n

Concurrent instances will lead to data corruption

An important thing to keep in mind is that having multiple instances of a PostgreSQL server running at the same time, using the same database files, will certainly lead to catastrophic situations and the corruption of those files.

To prevent this from happening, the --dependency=singleton job submission option will make sure that only one instance of that job (based on its name and user) will run at any given time.

"},{"location":"docs/software/using/postgresql/#connect-to-the-running-instance","title":"Connect to the running instance","text":"

Now, from any node on Sherlock, whether from a login node, an interactive job, or a batch job, using the mysql CLI or any application binding in any language, you should be able to connect to your running PostgreSQL instance,

First, identify the node your job is running on with squeue:

$ squeue -u $USER -n postgresql\n             JOBID PARTITION       NAME     USER ST       TIME  NODES NODELIST(REASON)\n          21383445    normal postgresql   kilian  R       0:07      1 sh-01-02\n

and then, point your PostgreSQL client to that node:

$ ml system postgresql\n$ psql -h sh-06-34  test_db\nPassword:\npsql (10.5)\nType \"help\" for help.\n\ntest_db=#\n

That's it! You can now run SQL queries from anywhere on Sherlock to your own PostgreSQL instance.

"},{"location":"docs/software/using/postgresql/#persistent-db-instances","title":"Persistent DB instances","text":"

SQL data is persistent

All the data you import in your SQL databases will be persistent across jobs. Meaning that you can run a PostgreSQL server job for the day, import data in its database, stop the job, and resubmit the same PostgreSQL server job the next day: all your data will still be there as long as the location you've chosen for your database (the $DB_DIR defined in the Preparation steps) is on a persistent storage location.

If you need database access for more than the maximum runtime of a job, you can use the instructions provided to define self-resubmitting recurring jobs and submit long-running database instances.

"},{"location":"docs/software/using/python/","title":"Python","text":""},{"location":"docs/software/using/python/#introduction","title":"Introduction","text":"

Python is an interpreted high-level programming language for general-purpose programming. Its design philosophy emphasizes code readability. It provides constructs that enable clear programming on both small and large scales, which makes it both easy to learn and very well-suited for rapid prototyping.

"},{"location":"docs/software/using/python/#more-documentation","title":"More documentation","text":"

The following documentation is specifically intended for using Python on Sherlock. For more complete documentation about Python in general, please see the Python documentation.

"},{"location":"docs/software/using/python/#python-on-sherlock","title":"Python on Sherlock","text":"

Sherlock features multiple versions of Python.

Some applications only work with legacy features of version 2.x, while more recent code will require specific version 3.x features. Modules on Sherlock may only be available in a single flavor (as denoted by their suffix: _py27 or _py36, because the application only supports one or the other.

You can load either version on Sherlock by doing the following commands:

$ ml python/2.7.13\n

or

$ ml python/3.6.1\n

The Python3 interpreter is python3

The Python3 executable is named python3, not python. So, once you have the \"python/3.6.1\" module loaded on Sherlock, you will need to use python3 to invoke the proper interpreter. python will still refer to the default, older system-level Python installation, and may result in errors when trying to run Python3 code.

This is an upstream decision detailed in PEP-394, not something specific to Sherlock.

"},{"location":"docs/software/using/python/#using-python","title":"Using Python","text":"

Once your environment is configured (ie. when the Python module is loaded), Python can be started by simply typing python at the shell prompt:

$ python\nPython 2.7.13 (default, Apr 27 2017, 14:19:21)\n[GCC 4.8.5 20150623 (Red Hat 4.8.5-11)] on linux2\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n>>>\n
"},{"location":"docs/software/using/python/#python-in-batch-jobs","title":"Python in batch jobs","text":"

Python output is buffered by default

By default, Python buffers console output. It means that when running Python in a batch job through Slurm, you may see output less often than you would when running interactively.

When output is being buffered, the print statements are aggregated until there is a enough data to print, and then the messages are all printed at once. And as a consequence, job output files (as specified with the --output and --error job submission options) will be refreshed less often and may give the impression that the job is not running.

For debugging or checking that a Python script is producing the correct output, you may want to switch off buffering.

"},{"location":"docs/software/using/python/#switching-off-buffering","title":"Switching off buffering","text":"

For a single python script you can use the -u option, as in python -u my_script.py. The -u option stands for \"unbuffered\".

For instance:

#!/bin/bash\n#SBATCH -n 1\n\npython -u my_script.py\n

Tip

You can also use the environment variable PYTHONUNBUFFERED to set unbuffered I/O for your whole batch script.

#!/bin/bash\n#SBATCH -n 1\n\nexport PYTHONUNBUFFERED=True\npython my_script.py\n

NB: There is some performance penalty for having unbuffered print statements, so you may want to reduce the number of print statements, or run buffered for production runs.

"},{"location":"docs/software/using/python/#python-packages","title":"Python packages","text":"

The capabilities of Python can be extended with packages developed by third parties. In general, to simplify operations, it is left up to individual users and groups to install these third-party packages in their own directories. However, Sherlock provides tools to help you install the third-party packages that you need.

Among many others, the following common Python packages are provided on Sherlock:

  • NumPy
  • SciPy

Python modules on Sherlock generally follow the naming scheme below:

py-<package_name>/version_py<python_version>\n

For instance, NumPy modules are:

  • py-numpy/1.14.3_py27
  • py-numpy/1.14.3_py36

You can list all available module versions for a package with ml spider <package_name>. For instance:

$ ml spider tensorflow\n-------------------------------------------------------------------------------\n  py-tensorflow:\n-------------------------------------------------------------------------------\n    Description:\n      TensorFlow\u2122 is an open source software library for numerical computation using data flow graphs.\n\n     Versions:\n        py-tensorflow/1.6.0_py27\n        py-tensorflow/1.6.0_py36\n        py-tensorflow/1.7.0_py27\n        py-tensorflow/1.9.0_py27\n        py-tensorflow/1.9.0_py36\n

Dependencies are handled automatically

When you decide to use NumPy on Sherlock, you just need to load the py-numpy module of your choice, and the correct Python interpreter will be loaded automatically. No need to load a python module explicitly.

"},{"location":"docs/software/using/python/#installing-packages","title":"Installing packages","text":"

If you need to use a Python package that is not already provided as a module on Sherlock, you can use the pip command. This command takes care of compiling and installing most of Python packages and their dependencies. All of pip's commands and options are explained in detail in the Pip user guide.

A comprehensive index of Python packages can be found at PyPI.

To install Python packages with pip, you'll need to use the --user option. This will make sure that those packages are installed in a user-writable location (by default, your $HOME directory). Since your $HOME directory is shared across nodes on Sherlock, you'll only need to install your Python packages once, and they'll be ready to be used on every single node in the cluster.

For example:

$ pip install --user <package_name>\n

For Python 3, use pip3:

$ pip3 install --user <package_name>\n

Python packages will be installed in $HOME/.local/lib/python<<version>/site-packages, meaning that packages for Python 2.x and Python 3.x will be kept separate. This both means that they won't interfere with each other, but also that if you need to use a package with both Python 2.x and 3.x, you'll need to install it twice, once for each Python version.

"},{"location":"docs/software/using/python/#list-installed-packages","title":"List installed packages","text":"

You can easily see the list of the Python packages installed in your environment, and their location, with pip list:

$ pip list -v\nPackage    Version Location                                                            Installer\n---------- ------- ------------------------------------------------------------------- ---------\npip        18.1    /share/software/user/open/python/2.7.13/lib/python2.7/site-packages pip\nsetuptools 28.8.0  /share/software/user/open/python/2.7.13/lib/python2.7/site-packages pip\nurllib3    1.24    /home/users/kilian/.local/lib/python2.7/site-packages               pip\nvirtualenv 15.1.0  /share/software/user/open/python/2.7.13/lib/python2.7/site-packages pip\n
"},{"location":"docs/software/using/python/#alternative-installation-path","title":"Alternative installation path","text":"

Python paths

While theoretically possible, installing Python packages in alternate locations can be tricky, so we recommend trying to stick to the pip install --user way as often as possible. But in case you absolutely need it, we provide some guidelines below.

One common case of needing to install Python packages in alternate locations is to share those packages with a group of users. Here's an example that will show how to install the urllib3 Python package in a group-shared location and let users from the group use it without having to install it themselves.

First, you need to create a directory to store those packages. We'll put it in $GROUP_HOME:

$ mkdir -p $GROUP_HOME/python/\n

Then, we load the Python module we need, and we instruct pip to install its packages in the directory we just created:

$ ml python/2.7.13\n$ PYTHONUSERBASE=$GROUP_HOME/python pip install --user urllib3\n

We still use the --user option, but with PYTHONUSERBASE pointing to a different directory, pip will install packages there.

Now, to be able to use that Python module, since it's not been installed in a default directory, you (and all the members of the group who will want to use that module) need to set their PYTHONPATH to include our new shared directory1:

$ export PYTHONPATH=$GROUP_HOME/python/lib/python2.7/site-packages:$PYTHONPATH\n

And now, the module should be visible:

$ pip list -v\nPackage    Version Location                                                            Installer\n---------- ------- ------------------------------------------------------------------- ---------\npip        18.1    /share/software/user/open/python/2.7.13/lib/python2.7/site-packages pip\nsetuptools 28.8.0  /share/software/user/open/python/2.7.13/lib/python2.7/site-packages pip\nurllib3    1.24    /home/groups/ruthm/python/lib/python2.7/site-packages               pip\nvirtualenv 15.1.0  /share/software/user/open/python/2.7.13/lib/python2.7/site-packages pip\n

$PYTHONPATH depends on the Python version

The $PYTHONPATH environment variable is dependent on the Python version you're using, so for Python 3.6, it should include $GROUP_HOME/python/lib/python3.6/site-packages

$PATH may also need to be updated

Some Python package sometimes also install executable scripts. To make them easily accessible in your environment, you may also want to modify your $PATH to include their installation directory.

For instance, if you installed Python packages in $GROUP_HOME/python:

$ export PATH=$GROUP_HOME/python/bin:$PATH\n

"},{"location":"docs/software/using/python/#installing-from-github","title":"Installing from GitHub","text":"

pip also supports installing packages from a variety of sources, including GitHub repositories.

For instance, to install HTTPie, you can do:

$ pip install --user git+git://github.com/jkbr/httpie.git\n
"},{"location":"docs/software/using/python/#installing-from-a-requirements-file","title":"Installing from a requirements file","text":"

pip allows installing a list of packages listed in a file, which can be pretty convenient to install several dependencies at once.

In order to do this, create a text file called requirements.txt and place each package you would like to install on its own line:

requirements.txt
numpy\nscikit-learn\nkeras\ntensorflow\n

You can now install your modules like so:

$ ml python\n$ pip install --user -r requirements.txt\n
"},{"location":"docs/software/using/python/#upgrading-packages","title":"Upgrading packages","text":"

pip can update already installed packages with the following command:

$ pip install --user --upgrade <package_name>\n

Upgrading packages also works with requirements.txt files:

$ pip install --user --upgrade -r requirements.txt\n
"},{"location":"docs/software/using/python/#uninstalling-packages","title":"Uninstalling packages","text":"

To uninstall a Python package, you can use the pip uninstall command (note that it doesn't take any --user option):

$ pip uninstall <package_name>\n$ pip uninstall -r requirements.txt\n
"},{"location":"docs/software/using/python/#virtual-environments","title":"Virtual environments","text":"

Work in progress

This page is a work in progress and is not complete yet. We are actively working on adding more content and information.

  1. This line can also be added to a user's ~/.profile file, for a more permanent setting.\u00a0\u21a9

"},{"location":"docs/software/using/quantum-espresso/","title":"Quantum Espresso","text":""},{"location":"docs/software/using/quantum-espresso/#introduction","title":"Introduction","text":"

Quantum ESPRESSO is an integrated suite of Open-Source computer codes for electronic-structure calculations and materials modeling at the nanoscale. It is based on density-functional theory, plane waves, and pseudo-potentials.Perl is a high-level, general-purpose, interpreted, dynamic programming

Quantum ESPRESSO has evolved into a distribution of independent and inter-operable codes in the spirit of an open-source project. The Quantum ESPRESSO distribution consists of a \u201chistorical\u201d core set of components, and a set of plug-ins that perform more advanced tasks, plus a number of third-party packages designed to be inter-operable with the core components. Researchers active in the field of electronic-structure calculations are encouraged to participate in the project by contributing their own codes or by implementing their own ideas into existing codes.

"},{"location":"docs/software/using/quantum-espresso/#more-documentation","title":"More documentation","text":"

The following documentation specifically intended for using Quantum Espresso on Sherlock. For more complete documentation about Quantum Espresso in general, please see the Quantum Espresso documentation.

"},{"location":"docs/software/using/quantum-espresso/#quantum-espresso-on-sherlock","title":"Quantum Espresso on Sherlock","text":"

To run Quantum Espresso on Sherlock, you can use one of the [provided modules][url_soft_qe], or run it from a container.

The CPU version of Quantum Espresso can be loaded via the quantum-espresso module:

$ ml chemistry quantum-espresso\n

and the GPU version can be loaded via the quantum-espresso_gpu module:

$ ml chemistry quantum-espresso_gpu\n
"},{"location":"docs/software/using/quantum-espresso/#examples","title":"Examples","text":"

Here are a few examples showing how to run the AUSURF112 benchmark.

"},{"location":"docs/software/using/quantum-espresso/#preparation","title":"Preparation","text":"

The first step is to get the benchmark files:

$ cd $SCRATCH\n$ git clone https://github.com/QEF/benchmarks qe_benchmarks\n$ cd qe_benchmarks/AUSURF112\n
"},{"location":"docs/software/using/quantum-espresso/#cpu-version","title":"CPU version","text":"

To submit a Quantum Espresso job to run the AUSURF112 benchmark on CPU nodes, the following submission script can be used:

qe-bench_cpu.sbatch
#!/bin/bash\n#SBATCH --nodes=2                # number of nodes for the job\n#SBATCH --ntasks-per-node=16     # number of tasks per node\n#SBATCH --time=00:30:00          # total run time limit (HH:MM:SS)\n#SBATCH --mail-type=begin        # send email when job begins\n#SBATCH --mail-type=end          # send email when job ends\n\nmodule reset\nmodule load chemistry\nmodule load quantum-espresso/7.0\n\ncd $SCRATCH/qe_benchmarks\ncd AUSURF112\n\nsrun pw.x -input ausurf.in -npool 2\n

In this example, the job will request 32 CPU cores on 2 nodes, 30 minutes of run time, and will send an email notification when the job starts and when it ends.

The job can be submitted with:

$ sbatch qe-bench_cpu.sbatch\n
"},{"location":"docs/software/using/quantum-espresso/#gpu-version","title":"GPU version","text":""},{"location":"docs/software/using/quantum-espresso/#native","title":"Native","text":"

The GPU version can be loaded through the quantum-espresso_gpu module.

Using the same benchmark files as for the CPU version above, you can create a job submissions script like this:

qe-bench_gpu.sbatch
#!/bin/bash\n#SBATCH --partition=gpu          # partition to submit the job to\n#SBATCH --nodes=2                # number of nodes for the job\n#SBATCH --gpus-per-node=1        # number of GPUs per node\n#SBATCH --time=00:30:00          # total run time limit (HH:MM:SS)\n#SBATCH --mail-type=begin        # send email when job begins\n#SBATCH --mail-type=end          # send email when job ends\n\nmodule reset\nmodule load chemistry\nmodule load quantum-espresso_gpu/7.0\n\ncd $SCRATCH/qe_benchmarks\ncd AUSURF112\n\nsrun pw.x -input ausurf.in -npool 2\n

In this example, the job will request 2 GPU on 2 nodes, 30 minutes of run time, and will send an email notification when the job starts and when it ends.

It can be submitted with:

$ sbatch qe-bench_gpu.sbatch\n
"},{"location":"docs/software/using/quantum-espresso/#ngc-container","title":"NGC container","text":"

Another option to run a GPU version of Quantum Espresso is to use a 3rd-party container.

The NVIDIA GPU Cloud (NGC) hosts a Quantum Espresso container container that could be used on Sherlock.

"},{"location":"docs/software/using/quantum-espresso/#with-singularity","title":"With Singularity","text":"

To use the container with Singularity, first pull the Quantum Espresso container with:

$ cd $SCRATCH\n$ singularity pull docker://nvcr.io/hpc/quantum_espresso:qe-7.0\n

Then create the following script:

qe-bench_gpu_singularity.sbatch
#!/bin/bash\n#SBATCH --partition=gpu          # partition to submit the job to\n#SBATCH --nodes=2                # number of nodes for the job\n#SBATCH --gpus-per-node=1        # number of GPUs per node\n#SBATCH --mem=32GB               # memory per node\n#SBATCH --time=00:30:00          # total run time limit (HH:MM:SS)\n#SBATCH --mail-type=begin        # send email when job begins\n#SBATCH --mail-type=end          # send email when job ends\n\ncd $SCRATCH/qe_benchmarks\ncd AUSURF112\n\nsrun singularity run --nv \\\n    $SCRATCH/quantum_espresso_qe-7.0.sif \\\n    pw.x -input ausurf.in -npool 2\n

and submit it:

$ sbatch qe-bench_gpu_singularity.sbatch\n
"},{"location":"docs/software/using/quantum-espresso/#with-pyxisenroot","title":"With pyxis/enroot","text":"

To use the container with pyxis/enroot, you can directly submit the following script:

qe-bench_gpu_enroot.sbatch
#!/bin/bash\n#SBATCH --partition=gpu          # partition to submit the job to\n#SBATCH --nodes=2                # number of nodes for the job\n#SBATCH --gpus-per-node=1        # number of GPUs per node\n#SBATCH --mem=32GB               # memory per node\n#SBATCH --time=00:30:00          # total run time limit (HH:MM:SS)\n#SBATCH --mail-type=begin        # send email when job begins\n#SBATCH --mail-type=end          # send email when job ends\n\ncd $SCRATCH/qe_benchmarks\ncd AUSURF112\n\nsrun --container-image nvcr.io/hpc/quantum_espresso:qe-7.0 \\\n     --container-workdir $PWD \\\n     pw.x -input ausurf.in -npool 2\n

and submit it:

$ sbatch qe-bench_gpu_singularity.sbatch\n
"},{"location":"docs/software/using/rclone/","title":"Rclone","text":""},{"location":"docs/software/using/rclone/#introduction","title":"Introduction","text":"

If you need to sync files between cloud storage to Sherlock, rclone is a command line program that can help. You can easily use it to transfer files from a cloud storage provider to Sherlock or Oak, or vice versa. The following tutorial walks through transferring files between Google Drive and Oak storage.

"},{"location":"docs/software/using/rclone/#more-documentation","title":"More documentation","text":"

For more information on running rclone, please see the official documentation.

"},{"location":"docs/software/using/rclone/#setup","title":"Setup","text":""},{"location":"docs/software/using/rclone/#rclone-config","title":"rclone config","text":"

Before transferring data for the first time, you will need to configure rclone so that it can access your Google Drive. This will require use of your browser, so you will need to connect to Sherlock with local port forwarding (ssh -L). You only need to do this when you are configuring rclone for the first time.

Use local terminal for rclone config

This method will not work in the Sherlock OnDemand shell. You will need to use your local machine's terminal to enable local port forwarding and to allow rclone to communicate with your browser. On Linux and macOS, you can use the Terminal app; on Windows, you can use the PowerShell app.

When running rclone config you will be prompted to enter names and values, indicated by the > symbol. To leave it empty, press Enter.

# Connect to Sherlock with local port fowarding\n$ ssh -L localhost:53682:localhost:53682 <SUNetID>@login.sherlock.stanford.edu\n\n\n# Load the rclone module\n$ ml system rclone\n\n\n# Run the rclone configuration tool\n$ rclone config\n\nNo remotes found, make a new one?\nn) New remote\ns) Set configuration password\nq) Quit config\nn/s/q> n\n\nEnter name for new remote.\nname> gdrive\n\nOption Storage.\nType of storage to configure.\nChoose a number from below, or type in your own value.\n 1 / 1Fichier\n   \\ (fichier)\n 2 / Akamai NetStorage\n   \\ (netstorage)\n       ...\n18 / Google Drive\n   \\ (drive)\n       ...\n48 / premiumize.me\n   \\ (premiumizeme)\n49 / seafile\n   \\ (seafile)\nStorage> drive\n\nOption client_id.\nGoogle Application Client Id\n...\nEnter a value. Press Enter to leave empty.\nclient_id>\n\nOption client_secret.\nOAuth Client Secret.\nLeave blank normally.\nEnter a value. Press Enter to leave empty.\nclient_secret>\n\nOption scope.\nScope that rclone should use when requesting access from drive.\nChoose a number from below, or type in your own value.\nPress Enter to leave empty.\n 1 / Full access all files, excluding Application Data Folder.\n   \\ (drive)\n...\nscope> 1\n\nOption service_account_file.\nService Account Credentials JSON file path.\nLeave blank normally.\n...\nEnter a value. Press Enter to leave empty.\nservice_account_file>\n\nEdit advanced config?\ny) Yes\nn) No (default)\ny/n> n\n\nUse auto config?\n * Say Y if not sure\n * Say N if you are working on a remote or headless machine\n\ny) Yes (default)\nn) No\ny/n> y\n\n2023/09/12 10:51:55 NOTICE: If your browser doesn't open automatically go to the\nfollowing link: http://127.0.0.1:53682/auth?state=#################\n2023/09/12 10:51:55 NOTICE: Log in and authorize rclone for access\n2023/09/12 10:51:55 NOTICE: Waiting for code...\n

At this point, you can copy and paste the provided link into your browser. You will be asked to confirm that you want to allow rclone to access your files. Once you have successfully done so, you can complete the configuration in the terminal.

Configure this as a Shared Drive (Team Drive)?\n\ny) Yes\nn) No (default)\ny/n> n\n\nConfiguration complete.\nOptions:\n...\nKeep this \"gdrive\" remote?\ny) Yes this is OK (default)\ne) Edit this remote\nd) Delete this remote\ny/e/d> y\n\nCurrent remotes:\n\nName                 Type\n====                 ====\ngdrive               drive\n\ne) Edit existing remote\nn) New remote\nd) Delete remote\nr) Rename remote\nc) Copy remote\ns) Set configuration password\nq) Quit config\ne/n/d/r/c/s/q> q\n
"},{"location":"docs/software/using/rclone/#examples","title":"Examples","text":""},{"location":"docs/software/using/rclone/#rclone-copy","title":"rclone copy","text":"

To transfer data between cloud storage and Sherlock or Oak, you can use the rclone copy command.

# Start an interactive dev session\n$ sh_dev\n\n# Load the rclone module\n$ ml system rclone\n\n# Copy a folder from Google Drive to Oak\n$ rclone copy gdrive:<folder name> /oak/stanford/groups/<group_name>/<folder name>\n\n$ Copy a single file from Oak to Google Drive\n$ rclone copy /oak/stanford/groups/<group name>/<file name> gdrive:\n
"},{"location":"docs/software/using/rclone/#rclone-lslsd","title":"rclone ls/lsd","text":"

To view the files and folders in your cloud storage, you can use the rclone ls and rclone lsd commands, respectively.

# Load the rclone module\n$ ml system rclone\n\n# List all top-level directories in Google Drive\n$ rclone lsd gdrive: --max-depth 1\n\n# List all files in a directory\n$ rclone ls gdrive:<folder name>\n\n# List all files on Google Drive (including those in folders)\n$ rclone ls gdrive:\n
"},{"location":"docs/software/using/schrodinger/","title":"Schr\u00f6dinger","text":""},{"location":"docs/software/using/schrodinger/#introduction","title":"Introduction","text":"

The Schr\u00f6dinger suite is a commercial and licensed software used to simulate and model molecular behavior at the atomic level. The Schr\u00f6dinger software tools include molecular dynamics simulations, quantum mechanics calculations, virtual screening and visualization tools.

"},{"location":"docs/software/using/schrodinger/#more-documentation","title":"More documentation","text":"

The following documentation specifically intended for using Schr\u00f6dinger on Sherlock. For more complete documentation about Schr\u00f6dinger in general, please contact Schr\u00f6dinger support.

"},{"location":"docs/software/using/schrodinger/#schrodinger-on-sherlock","title":"Schr\u00f6dinger on Sherlock","text":""},{"location":"docs/software/using/schrodinger/#licensing","title":"Licensing","text":"

Stanford Libraries have purchased a site license for the Schr\u00f6dinger suite. Please contact Stanford Libraries at sciencelibrary@stanford.edu and CC srcc-support@stanford.edu if you would like to access Schr\u00f6dinger on Sherlock: after we receive confirmation, your PI group will be granted access on Sherlock.

"},{"location":"docs/software/using/schrodinger/#using-schrodinger","title":"Using Schr\u00f6dinger","text":"

You can use Schr\u00f6dinger software after having loaded the corresponding software module with the module command. To load the current default version:

module load chemistry schrodinger\n

To see all the available versions, you can use the module spider command:

$ module spider schrodinger\n

Once loaded, the $SCHRODINGER environment variable is automatically set to allow all Schr\u00f6dinger commands to run. For example, to run the jaguar command:

$ jaguar run -WAIT H20.in\n

To call the basic Schr\u00f6dinger run command, just enter:

$ run\n

or glide:

$ glide\nusage: glide_startup.py [options] <input_file>\nglide_startup.py: error: the following arguments are required: input_file\n
"},{"location":"docs/software/using/schrodinger/#maestro-gui","title":"Maestro GUI","text":"

OnDemand shell sessions

Opening an X11/GUI session will not work in a Sherlock OnDemand terminal session. You will need to use the method mentioned below, i.e. a standard terminal session with an X11 client.

To launch the Maestro GUI, once you have loaded the Schr\u00f6dinger module, simply run:

$ maestro\n

You'll need to enable X11 forwarding in your initial connection to Sherlock, and request it as well for your job allocation.

Here are some example commands you can run:

# on your local machine\n$ ssh -X login.sherlock.stanford.edu\n\n# then from a Sherlock login node\n$ sh_dev -m 16GB\n\n# and finally on the allocated compute node:\n$ ml load chemistry schrodinger\n$ maestro\n

This will launch Maestro on a compute node and display its graphical user interface on your local machine's display.

GUI performance

Please note that running graphical user interfaces (GUIs) over the network via X11 over SSH may not necessarily yield the best performance. Graphical analysis is often best done on a local machine, while intensive, batch scheduled computations are carried over on the cluster.

For more information about X11 forwarding, you can refer to this page.

"},{"location":"docs/software/using/schrodinger/#examples","title":"Examples","text":""},{"location":"docs/software/using/schrodinger/#batch-job-submission","title":"batch job submission","text":"

Here's an example batch script, requesting 1 CPU, for 10 minutes on the normal partition, that can be saved as water.sbatch:

#!/usr/bin/bash\n#SBATCH -o water.%j.out\n#SBATCH -e water.%j.err\n#SBATCH -n 1\n#SBATCH -t 10:00\n#SBATCH -p normal\n\n# Load required modules\nmodule load chemistry schrodinger\n\n# Run Schr\u00f6dinger, -WAIT is often required\njaguar run -WAIT H20.in\n

Save this input file as H2O.in:

&gen\n&\n&echo\n&\n&zmat\nO       0.0000000000000   0.0000000000000  -0.1135016000000\nH1      0.0000000000000   0.7531080000000   0.4540064000000\nH2      0.0000000000000  -0.7531080000000   0.4540064000000\n&\n

And you can submit the batch script with:

$ sbatch water.sbatch\n

After execution, you should find a H20.out output file in the current directory, as well as a log file (H20.log). If you don't, you can check for errors in the job output and error files: water.<jobid>.{out,err}.

"},{"location":"docs/software/using/spark/","title":"Spark","text":""},{"location":"docs/software/using/spark/#introduction","title":"Introduction","text":"

Apache Spark\u2122 is a general engine for large-scale data processing. This document gives a quick introduction how to get a first test program in Spark running on Sherlock.

"},{"location":"docs/software/using/spark/#more-documentation","title":"More documentation","text":"

The following documentation specifically intended for using Spark on Sherlock. For more complete documentation about Spark in general, please see the Apache Spark documentation.

"},{"location":"docs/software/using/spark/#spark-on-sherlock","title":"Spark on Sherlock","text":"

Running Apache Spark on Sherlock is a bit different from using a traditional Spark/Hadoop cluster in that it requires some level of integration with the scheduler. In a sense, the computing resources (memory and CPU) need to be allocated twice. First, sufficient resources for the Spark application need to be allocated via Slurm ; and secondly, spark-submit resource allocation flags need to be properly specified.

In order to use Spark, three steps have to be kept in mind when submitting a job to the queuing system:

  1. a new Spark cluster has to be started on the allocated nodes
  2. once the Spark cluster is up and running, Spark jobs have to be submitted to the cluster
  3. after all Spark jobs have finished running, the cluster has to be shut down

The following scripts show how to implement these three steps, and use the Pi Monte-Carlo calculation as an example.

"},{"location":"docs/software/using/spark/#single-node-job","title":"Single-node job","text":"

In this example, all the Spark processes run on the same compute node, which makes for a fairly simply sbatch script. The following example will start a 8-core job on a single node, and run a Spark task within that allocation:

#!/bin/bash\n\n#SBATCH --job-name=spark_singlenode\n#SBATCH --nodes=1\n#SBATCH --cpus-per-task=8\n#SBATCH --time=10\n\nmodule load spark\n\n# This syntax tells spark to use all cpu cores on the node.\nexport MASTER=\"local[*]\"\n\n# This is a Scala example\nrun-example SparkPi 1000\n\n# This is a Python example.\nspark-submit --master $MASTER $SPARK_HOME/examples/src/main/python/pi.py 1000\n
"},{"location":"docs/software/using/spark/#multi-node-job","title":"Multi-node job","text":"

To start a Spark cluster and run a task on multiple nodes, more preliminary steps are necessary. Here's an example script that will span 2 nodes, start 2 Spark workers on each node, and allow each worker to use 8 cores:

#!/bin/bash\n#SBATCH --nodes=2\n#SBATCH --mem-per-cpu=4G\n#SBATCH --cpus-per-task=8\n#SBATCH --ntasks-per-node=2\n#SBATCH --output=sparkjob-%j.out\n\n## --------------------------------------\n## 0. Preparation\n## --------------------------------------\n\n# load the Spark module\nmodule load spark\n\n# identify the Spark cluster with the Slurm jobid\nexport SPARK_IDENT_STRING=$SLURM_JOBID\n\n# prepare directories\nexport SPARK_WORKER_DIR=${SPARK_WORKER_DIR:-$HOME/.spark/worker}\nexport SPARK_LOG_DIR=${SPARK_LOG_DIR:-$HOME/.spark/logs}\nexport SPARK_LOCAL_DIRS=${SPARK_LOCAL_DIRS:-/tmp/spark}\nmkdir -p $SPARK_LOG_DIR $SPARK_WORKER_DIR\n\n## --------------------------------------\n## 1. Start the Spark cluster master\n## --------------------------------------\n\nstart-master.sh\nsleep 1\nMASTER_URL=$(grep -Po '(?=spark://).*' \\\n             $SPARK_LOG_DIR/spark-${SPARK_IDENT_STRING}-org.*master*.out)\n\n## --------------------------------------\n## 2. Start the Spark cluster workers\n## --------------------------------------\n\n# get the resource details from the Slurm job\nexport SPARK_WORKER_CORES=${SLURM_CPUS_PER_TASK:-1}\nexport SPARK_MEM=$(( ${SLURM_MEM_PER_CPU:-4096} * ${SLURM_CPUS_PER_TASK:-1} ))M\nexport SPARK_DAEMON_MEMORY=$SPARK_MEM\nexport SPARK_WORKER_MEMORY=$SPARK_MEM\nexport SPARK_EXECUTOR_MEMORY=$SPARK_MEM\n\n# start the workers on each node allocated to the tjob\nexport SPARK_NO_DAEMONIZE=1\nsrun  --output=$SPARK_LOG_DIR/spark-%j-workers.out --label \\\n      start-slave.sh ${MASTER_URL} &\n\n## --------------------------------------\n## 3. Submit a task to the Spark cluster\n## --------------------------------------\n\nspark-submit --master ${MASTER_URL} \\\n             --total-executor-cores $((SLURM_NTASKS * SLURM_CPUS_PER_TASK)) \\\n             $SPARK_HOME/examples/src/main/python/pi.py 10000\n\n## --------------------------------------\n## 4. Clean up\n## --------------------------------------\n\n# stop the workers\nscancel ${SLURM_JOBID}.0\n\n# stop the master\nstop-master.sh\n
"},{"location":"docs/storage/","title":"Storage on Sherlock","text":"

Sherlock provides access to several file systems, each with distinct storage characteristics. Each user and PI group get access to a set of predefined directories in these file systems to store their data.

Sherlock is a compute cluster, not a storage system

Sherlock's storage resources are limited and are shared among many users. They are meant to store data and code associated with projects for which you are using Sherlock's computational resources. This space is for work actively being computed on with Sherlock, and should not be used as a target for backups from other systems.

If you're looking for a long-term storage solution for research data, Stanford Research Computing offers the Oak storage system, which is specifically intended for this usage.

Those file systems are shared with other users, and are subject to quota limits and for some of them, purge policies (time-residency limits).

"},{"location":"docs/storage/#filesystem-overview","title":"Filesystem overview","text":""},{"location":"docs/storage/#features-and-purpose","title":"Features and purpose","text":"Name Type Backups / Snapshots Performance Purpose Cost $HOME, $GROUP_HOME NFS / low small, important files (source code, executable files, configuration files...) free $SCRATCH, $GROUP_SCRATCH Lustre / high bandwidth large, temporary files (checkpoints, raw application output...) free $L_SCRATCH local SSD / low latency, high IOPS job specific output requiring high IOPS free $OAK Lustre option / moderate long term storage of research data volume-based1"},{"location":"docs/storage/#access-scope","title":"Access scope","text":"Name Scope Access sharing level $HOME cluster user $GROUP_HOME cluster group $SCRATCH cluster user $GROUP_SCRATCH cluster group $L_SCRATCH compute node user $OAK cluster (optional, purchase required) group

Group storage locations are typically shared between all the members of the same PI group. User locations are only accessible by the user.

"},{"location":"docs/storage/#quotas-and-limits","title":"Quotas and limits","text":"

Volume and inodes

Quotas are applied on both volume (the amount of data stored in bytes) and inodes: an inode (index node) is a data structure in a Unix-style file system that describes a file-system object such as a file or a directory. In practice, each filesystem entry (file, directory, link) counts as an inode.

Name Quota type Volume quota Inode quota Retention $HOME directory 15 GB n/a $GROUP_HOME directory 1 TB n/a $SCRATCH directory 100 TB 20 million time limited $GROUP_SCRATCH directory 100 TB 20 million time limited $L_SCRATCH n/a n/a n/a job lifetime $OAK directory amount purchased function of the volume purchased

Quota types:

  • directory: based on files location and account for all the files that are in a given directory.
  • user: based on files ownership and account for all the files that belong to a given user.
  • group: based on files ownership and account for all the files that belong to a given group.

Retention types:

  • : files are kept as long as the user account exists on Sherlock.
  • time limited: files are kept for a fixed length of time after they've been last modified. Once the limit is reached, files expire and are automatically deleted.
  • job lifetime: files are only kept for the duration of the job and are automatically purged when the job ends.

Global fail-safe user and quota groups on /scratch

To prevent potential issues which would result in the file system filling up completely and making it unusable for everyone, additional user and group-level quotas are in place on the /scratch file system, as a fail-safe:

  • a user will not be able to use more than 250 TB (50M inodes) in total, in all the /scratch directories they have access to.

  • a group will not be able to use more than 1 PB (200M inodes) in total across all the /scratch directories its group members have access to.

"},{"location":"docs/storage/#checking-quotas","title":"Checking quotas","text":"

To check your quota usage on the different filesystems you have access to, you can use the sh_quota command:

$ sh_quota\n+---------------------------------------------------------------------------+\n| Disk usage for user kilian (group: ruthm)                                 |\n+---------------------------------------------------------------------------+\n|   Filesystem |  volume /   limit                  | inodes /  limit       |\n+---------------------------------------------------------------------------+\n          HOME |   9.4GB /  15.0GB [||||||     62%] |      - /      - (  -%)\n    GROUP_HOME | 562.6GB /   1.0TB [|||||      56%] |      - /      - (  -%)\n       SCRATCH |  65.0GB / 100.0TB [            0%] | 143.8K /  20.0M (  0%)\n GROUP_SCRATCH | 172.2GB / 100.0TB [            0%] |  53.4K /  20.0M (  0%)\n           OAK |  30.8TB / 240.0TB [|          12%] |   6.6M /  36.0M ( 18%)\n+---------------------------------------------------------------------------+\n

Several options are provided to allow listing quotas for a specific filesystem only, or in the context of a different group (for users who are members of several PI groups). Please see the sh_quota usage information for details:

$ sh_quota -h\nsh_quota: display user and group quota information for all accessible filesystems.\n\nUsage: sh_quota [OPTIONS]\n    Optional arguments:\n        -f FILESYSTEM   only display quota information for FILESYSTEM.\n                        For instance: \"-f $HOME\"\n        -g GROUP        for users with multiple group memberships, display\n                        group quotas in the context of that group\n        -n              don't display headers\n        -j              JSON output (implies -n)\n
"},{"location":"docs/storage/#examples","title":"Examples","text":"

For instance, to only display your quota usage on $HOME:

$ sh_quota -f HOME\n

If you belong to multiple groups, you can display the group quotas for your secondary groups with:

$ sh_quota -g <group_name>\n

And finally, for great output control, an option to display quota usage in JSON is provided via the -j option:

$ sh_quota -f SCRATCH -j\n{\n  \"SCRATCH\": {\n    \"quotas\": {\n      \"type\": \"user\",\n      \"blocks\": {\n        \"usage\": \"47476660\",\n        \"limit\": \"21474836480\"\n      },\n      \"inodes\": {\n        \"usage\": \"97794\",\n        \"limit\": \"20000000\"\n      }\n    }\n  }\n}\n
"},{"location":"docs/storage/#locating-large-directories","title":"Locating large directories","text":"

It's not always easy to identify files and directories that take the most space when getting close to the quota limits. Some tools can help with that.

  • du can be used to display the volume used by files and directories, in a given folder:

    $ cd mydir/\n$ du --human-readable --summarize  *\n101M    dir\n2.0M    file\n

    Note

    du will ignore hidden entries (everything that starts with a dot (.)). So when using it in your $HOME directory, it will skip things like .cache or .conda, which can contain significant volumes.

  • ncdu is an interactive disk usage analyzer, that generates visual representation of the volume (and inode count) for directories. To run it, you need to load the ncdu module, and then run it on your directory of choice:

    $ ml system ncdu\n$ ncdu $HOME\n

    For very large directories, running ncdu in an interactive shell on a compute node is recommended, via sh_dev.

    You'll been there presented with an interactive file browser, showing information about the volume used by your directories, which should make easy to pinpoint where most space is used.

Info

Note that any tool you use to view directory contents will only be able to show files that your user account has read access to. So on group-shared spaces, if you see a major difference between the totals from a tool like ncdu and the information reported by sh_quota, that can be an indicator that one of your group members has restricted permissions on a large number of items in your space.

"},{"location":"docs/storage/#where-should-i-store-my-files","title":"Where should I store my files?","text":"

Not all filesystems are equivalent

Choosing the appropriate storage location for your files is an essential step towards making your utilization of the cluster the most efficient possible. It will make your own experience much smoother, yield better performance for your jobs and simulations, and contribute to make Sherlock a useful and well-functioning resource for everyone.

Here is where we recommend storing different types of files and data on Sherlock:

  • personal scripts, configuration files and software installations \u2192 $HOME
  • group-shared scripts, software installations and medium-sized datasets \u2192 $GROUP_HOME
  • temporary output of jobs, large checkpoint files \u2192 $SCRATCH
  • curated output of job campaigns, large group-shared datasets, archives \u2192 $OAK
"},{"location":"docs/storage/#accessing-filesystems","title":"Accessing filesystems","text":""},{"location":"docs/storage/#on-sherlock","title":"On Sherlock","text":"

Filesystem environment variables

To facilitate access and data management, user and group storage location on Sherlock are identified by a set of environment variables, such as $HOME or $SCRATCH.

We strongly recommend using those variables in your scripts rather than explicit paths, to facilitate transition to new systems for instance. By using those environment variables, you'll be sure that your scripts will continue to work even if the underlying filesystem paths change.

To see the contents of these variables, you can use the echo command. For instance, to see the absolute path of your $SCRATCH directory:

$ echo $SCRATCH\n/scratch/users/kilian\n

Or for instance, to move to your group-shared home directory:

$ cd $GROUP_HOME\n
"},{"location":"docs/storage/#from-other-systems","title":"From other systems","text":"

External filesystems cannot be mounted on Sherlock

For a variety of security, manageability and technical considerations, we can't mount external filesystems nor data storage systems on Sherlock. The recommended approach is to make Sherlock's data available on external systems.

You can mount any of your Sherlock directories on any external system you have access to by using SSHFS. For more details, please refer to the Data Transfer page.

  1. For more information about Oak, its characteristics and cost model, please see the Oak Service Description page.\u00a0\u21a9

"},{"location":"docs/storage/data-protection/","title":"Data protection","text":"

Data protection is mostly a task for the user

Except for $HOME and $GROUP_HOME, data on Sherlock is not backed up, nor archived. It's up to each user and group to make sure they maintain multiple copies of their data if needed.

"},{"location":"docs/storage/data-protection/#snapshots","title":"Snapshots","text":"

File system snapshots represent the state of the file system at a particular point in time. They allow accessing the file system contents as it was a different times in the past, and get back data that may have been deleted or modified since the snapshot was taken.

Important

Snapshots are only available on $HOME and $GROUP_HOME.

"},{"location":"docs/storage/data-protection/#accessing-snapshots","title":"Accessing snapshots","text":"

Snapshots taken in $HOME and $GROUP_HOME are accessible in a .snapshot directory at any level of the hierarchy. Those .snapshot directories don't appear when listing directory contents with ls, but they can be listed explicitly or accessed with cd:

$ cd $HOME\n$ ls -ald .snapshot/users*\n[...]\ndrwx------ 118 sunetid group  6680 Jul 21 11:16 .snapshot/users.daily.20170721\ndrwx------ 118 sunetid group  6702 Jul 21 16:19 .snapshot/users.daily.20170722\ndrwx------ 118 sunetid group  6702 Jul 21 16:19 .snapshot/users.daily.20170723\ndrwx------ 118 sunetid group  6702 Jul 24 10:57 .snapshot/users.daily.20170724\ndrwx------ 118 sunetid group  6702 Jul 24 10:57 .snapshot/users.daily.latest\ndrwx------ 118 sunetid group  6702 Jul 21 16:19 .snapshot/users.hourly.20170722-16:00\ndrwx------ 118 sunetid group  6702 Jul 21 16:19 .snapshot/users.hourly.20170722-17:00\ndrwx------ 118 sunetid group  6702 Jul 21 16:19 .snapshot/users.hourly.20170722-18:00\ndrwx------ 118 sunetid group  6702 Jul 21 16:19 .snapshot/users.hourly.20170722-19:00\ndrwx------ 118 sunetid group  6702 Jul 21 16:19 .snapshot/users.hourly.20170722-20:00\n[...]\n$ cd .snapshot/users.daily.latest\n

For instance:

  • the $HOME/.snapshot/users.daily.latest directory is the latest daily snapshot available, and stores the contents of the $HOME directory as they were when the last daily snapshot was taken,
  • the $HOME/foo/.snapshot/users.hourly.20170722-18:00 can be used to retrieve the contents of the $HOME/foo directory as it was at 6pm on July 22th, 2017.
"},{"location":"docs/storage/data-protection/#restoring-from-a-snapshot","title":"Restoring from a snapshot","text":"

If you deleted a file or modified it and want to restore an earlier version, you can simply copy the file from its saved version in the appropriate snapshot.

Examples:

  • to restore the last known version of $HOME/foo/bar:

    $ cp $HOME/foo/.snapshot/users.hourly.latest/bar $HOME/foo/bar\n

    or

    $ cp $HOME/.snapshot/foo/users.hourly.latest/bar $HOME/foo/bar\n

    (both commands are equivalent)

  • to restore your ~/.bashrc file from 2 days ago:

    $ SNAP_DATE=$(date +%Y%m%d -d \"2 days ago\")\n$ cp $HOME/.snapshot/users.daily.${SNAP_DATE}/.bashrc $HOME/.bashrc\n
"},{"location":"docs/storage/data-protection/#snapshot-policy","title":"Snapshot policy","text":"

The current1 policy is to take snapshots on an hourly, daily and weekly basis. Older snapshots automatically expire after their retention period. The snapshot policy applies to both $HOME and $GROUP_HOME storage spaces.

Snapshot frequency Retention period Number of snapshots hourly 2 days 48 daily 1 week 7 weekly 1 month 4

The shortest interval between snapshots is an hour. That means that if you create a file and then delete it within the hour, it won't appear in snapshots, and you won't be able to restore it.

If a file exists for more than an hour, and is then deleted, it will be present in the hourly snapshots for the next 48 hours, and you'll be able to retrieve it during that period. Similarly, if a file exists for more than a day, it could be restored for up to 7 days.

Snapshots don't count towards your quota.

Snapshots, as well as the entire filesystem, are replicated to an off-site system, to ensure that data could be retrieved even in case of a catastrophic failure of the whole system or datacenter-level disaster.

"},{"location":"docs/storage/data-protection/#backups","title":"Backups","text":"

Although Stanford Research Computing doesn't offer any backup service per se, we do provide all the tools required to transfer data in and out of Sherlock.

Suggested options to backup your data include:

  • Oak, Stanford Research Computing's long-term research data storage service (Recommended)
  • University IT Storage options and backup services
  • Cloud storage providers (see the Data transfer page for information about the tools we provide to transfer files to/from the cloud)
  1. The snapshot policy is subject to change and may be adjusted as the storage system usage conditions evolve.\u00a0\u21a9

"},{"location":"docs/storage/data-sharing/","title":"Data sharing","text":"

The following sections present and detail options to share data across users and groups on Sherlock.

"},{"location":"docs/storage/data-sharing/#sharing-data-locally-on-sherlock","title":"Sharing data locally on Sherlock","text":""},{"location":"docs/storage/data-sharing/#traditional-unix-permissions","title":"Traditional Unix permissions","text":"

Standard Unix file permissions are supported on Sherlock and provide read, write and execute permissions for the three distinct access classes.

The access classes are defined as follows:

  • Files and directories are owned by a user. The owner determines the file's user class. Distinct permissions apply to the owner.
  • Files and directories are assigned a group, which define the file's group class. Distinct permissions apply to members of the file's group. The owner may be a member of the file's group.
  • Users who are not the owner, nor a member of the group, comprise a file's others class. Distinct permissions apply to others.

The following permissions apply to each class:

  • The read permission grants the ability to read a file. When set for a directory, this permission grants the ability to read the names of files in the directory, but not to find out any further information about them such as contents, file type, size, ownership, permissions.
  • The write permission grants the ability to modify a file. When set for a directory, this permission grants the ability to modify entries in the directory. This includes creating files, deleting files, and renaming files.
  • The execute permission grants the ability to execute a file. This permission must be set for executable programs, including shell scripts, in order to allow the operating system to run them. When set for a directory, this permission grants the ability to access file contents and meta-information if its name is known, but not list files inside the directory, unless read is set also.

Shared directories traversal

If you need to give access to one of your files to another user, they will at least need execute permission on each directory within the path to that file.

The effective permissions are determined based on the first class the user falls within in the order of user, group then others. For example, the user who is the owner of the file will have the permissions given to the user class regardless of the permissions assigned to the group class or others class.

While traditional Unix permissions are sufficient in most cases to share files with all the users within the same group, they are not enough to share files with a specific subset of users, or with users from other groups. Access Control Lists (ACLs) can be used for that purpose.

There are two type of ACLs supported on Sherlock depending on the underlying filesystem:

Type Filesystems NFSv4 ACLs $HOME and $GROUP_HOME POSIX ACLs $SCRATCH, $GROUP_SCRATCH, $L_SCRATCH and $OAK"},{"location":"docs/storage/data-sharing/#posix-acls","title":"POSIX ACLs","text":"

POSIX ACLs allows you to grant or deny access to files and directories for different users (or groups), independently of the file owner or group.

Two types of POSIX ACLs can be defined:

  • Access ACLs: grant permission for a specific file or directory.
  • Default ACLs: allow to set a default set of ACLs that will be applied to any file or directory without any already defined ACL. Can only be set on directories.

ACLs are set with the setfacl command, and displayed with getfacl. For more details and examples, please refer to this documentation.

In the example below, we allow two users to access a restricted directory located at $GROUP_SCRATCH/restricted-dir/:

$ cd $GROUP_SCRATCH\n\n### Create new directory\n$ mkdir restricted-dir\n\n### Remove 'group' and 'other' access\n$ chmod g-rwx,o-rwx restricted-dir\n\n### Give user bob read and traversal permissions to the directory\n$ setfacl -m u:bob:rX restricted-dir\n\n### Use default ACLs (-d) to give user bob read access to all new\n### files and sub-directories that will be created in \"restricted-dir\"\n$ setfacl -d -m u:bob:rX restricted-dir\n\n### Give user alice read, write and traversal permissions for the directory\n$ setfacl -m u:alice:rwX restricted-dir\n\n### Use default ACLs (-d) to give user alice read and write access to all\n### new files and sub-directories\n$ setfacl -d -m u:alice:rwX restricted-dir\n\n### Show ACLs\n$ getfacl restricted-dir\n# file: restricted-dir/\n# owner: joe\n# group: grp\n# flags: -s-\nuser::rwx\nuser:bob:r-x\ngroup::---\nmask::r-x\nother::---\ndefault:user::rwx\ndefault:user:alice:rwx\ndefault:user:bob:r-x\ndefault:group::---\ndefault:mask::rwx\ndefault:other::---\n

Default permissions on $GROUP_SCRATCH

By default, the Unix permissions on the root directory $GROUP_SCRATCH don't allow read nor traversal access for others (ie. any user not part of your PI group). If you need to share files with users outside of your own group, please contact us so we can set the appropriate permissions on your folder.

For $SCRATCH, you're the owner of the directory and so you can change the permissions yourself.

"},{"location":"docs/storage/data-sharing/#nfsv4-acls","title":"NFSv4 ACLs","text":"

$HOME and $GROUP_HOME also allow setting ACLs, albeit with different syntax and semantics than POSIX ACLs. The principle is very similar, though.

An ACL in NFSv4 is a list of rules setting permissions on files or directories. A permission rule, or Access Control Entry (ACE), is of the form type:flags:principle:permissions.

Commonly used entries for these fields are:

  • type: A (allow) or D (deny)
  • flags: g (group), d (directory-inherit), f (file-inherit), n (no-propagate-inherit), or i (inherit-only)
  • principle: a named user (user@sherlock), a group, or one of three special principles: OWNER@, GROUP@, and EVERYONE@.
  • permissions: there are 14 permission characters, as well as the shortcuts R, W, and X. Here is a list of possible permissions that can be included in the permissions field (options are Case Sensitive)
  • r read-data (files) / list-directory (directories)
  • w write-data (files) / create-file (directories)
  • x execute (files) / change-directory (directories)
  • a append-data (files) / create-subdirectory (directories)
  • t read-attributes: read the attributes of the file/directory.
  • T write-attributes: write the attributes of the file/directory.
  • n read-named-attributes: read the named attributes of the file/directory.
  • N write-named-attributes: write the named attributes of the file/directory.
  • c read-ACL: read the file/directory NFSv4 ACL.
  • C write-ACL: write the file/directory NFSv4 ACL.
  • o write-owner: change ownership of the file/directory.
  • y synchronize: allow clients to use synchronous I/O with the server.
  • d delete: delete the file/directory. Some servers will allow a delete to occur if either this permission is set in the file/directory or if the delete-child permission is set in its parent directory.
  • D delete-child: remove a file or subdirectory from within the given directory (directories only)
  • A comprehensive listing of allowable field strings is given in the manual page nfs4_acl(5)

    To see what permissions are set on a particular file, use the nfs4_getfacl command. For example, newly created file1 may have default permissions listed by ls -l as -rw-r\u2014r\u2014. Listing the permissions with nfs4_getfacl would display the following:

    $ nfs4_getfacl file1\nA::OWNER@:rwatTnNcCoy\nA:g:GROUP@:rtncy\nA::EVERYONE@:rtncy\n

    To set permissions on a file, use the nfs4_setfacl command. For convenience, NFSv4 provides the shortcuts R, W and X for setting read, write, and execute permissions. For example, to add write permissions for the current group on file1, use nfs4_setfacl with the -a switch:

    $ nfs4_setfacl -a A::GROUP@:W file1\n

    This command switched the GROUP@ permission field from rtncy to rwatTnNcCoy. However, be aware that NFSv4 file permission shortcuts have a different meanings than the traditional Unix r, w, and x. For example issuing chmod g+w file1 will set GROUP@ to rwatncy.

    Although the shortcut permissions can be handy, often rules need to be more customized. Use nfs4_setfacl -e file1 to open the ACL for file1 in a text editor.

    Access Control Entries allow more fine grained control over file and directory permissions than does the chmod command. For example, if user joe wants to give read, write and traverse permissions to jack for her directory private, she would issue:

    $ nfs4_setfacl -R -a A::jack@sherlock:RWX private/\n

    The -R switch recursively applies the rule to the files and directories within private/ as well.

    To allow jack to create files and subdirectories within private/ with the permissions as granted above, inheritance rules need to be applied.

    $ nfs4_setfacl -R -a A:fd:jack@sherlock:RWX private/\n

    By default, each permission is in the Deny state and an ACE is required to explicitly allow a permission. However, be aware that a server may silently override a users ACE, usually to a less permissive setting.

    For complete documentation and examples on using NFSv4 ACLs, please see the manual page at nfs4_acl(5).

    Default permissions on $GROUP_HOME

    By default, the Unix permissions on the root directory $GROUP_HOME don't allow read nor traversal access for others (ie. any user not part of your PI group). If you need to share files with users outside of your own group, please contact us so we can set the appropriate permissions on your folder.

    For $HOME, you're the owner of the directory and so you can change the permissions yourself.

    "},{"location":"docs/storage/data-sharing/#sharing-data-outside-of-sherlock","title":"Sharing data outside of Sherlock","text":"

    If you'd like to share data stored on Sherlock with external collaborators, there are two possibilities:

    1. sponsor a SUNet ID1 for these collaborators, and contact us us to create a account for them on Sherlock. This will grant them access to your resources on Sherlock (compute as well as storage) and give them access to your group shared files, like any other user in your group.

    2. if you don't want to grant full access to your Sherlock resources to your external collaborators, you can use the Globus data sharing feature. This won't require your collaborators to get Stanford accounts, and will allow easy sharing of the datasets of your choice.

      Globus Sharing is only available through the Oak endpoint

      Globus Sharing is only available on $OAK, using the Oak Globus Endpoint 2 (srcc#oak).

      For complete details about sharing data with Globus, please see the Globus documentation at https://docs.globus.org/how-to/share-files/

    1. a base-level SUNet ID (free) is sufficient to get an account on Sherlock. For more details about SUNet ID levels and associated services, please see the Stanford UIT SUNet IDs page.\u00a0\u21a9

    2. SUNet ID required\u00a0\u21a9

    "},{"location":"docs/storage/data-transfer/","title":"Data transfer","text":"","tags":["connection"]},{"location":"docs/storage/data-transfer/#transfer-protocols","title":"Transfer protocols","text":"

    A number of methods allow transferring data in/out of Sherlock. For most cases, we recommend using SSH-based file transfer commands, such as scp, sftp, or rsync. They will provide the best performance for data transfers from and to campus.

    For large transfers, using DTNs is recommended

    Most casual data transfers could be done through the login nodes, by pointing your transfer tool to login.sherlock.stanford.edu. But because of resource limits on the login nodes, larger transfer may not work as expected.

    For transferring large amounts of data, Sherlock features a specific Data Transfer Node, with dedicated bandwidth, as well as a managed Globus endpoint, that can be used for scheduled, unattended data transfers.

    We also provide tools on Sherlock to transfer data to various Cloud providers, such as AWS, Google Drive, Dropbox, Box, etc.

    ","tags":["connection"]},{"location":"docs/storage/data-transfer/#prerequisites","title":"Prerequisites","text":"

    Most of the commands detailed below require a terminal and an SSH client1 on your local machine to launch commands.

    You'll need to start a terminal and type the given example commands at the prompt, omitting the initial $ character (it just indicates a command prompt, and then should not be typed in).

    ","tags":["connection"]},{"location":"docs/storage/data-transfer/#host-keys","title":"Host keys","text":"

    Upon your very first connection to Sherlock, you will be greeted by a warning such as :

    The authenticity of host 'login.sherlock.stanford.edu' can't be established.\nECDSA key fingerprint is SHA256:eB0bODKdaCWtPgv0pYozsdC5ckfcBFVOxeMwrNKdkmg.\nAre you sure you want to continue connecting (yes/no)?\n

    The same warning will be displayed if your try to connect to one of the Data Transfer Node (DTN):

    The authenticity of host 'dtn.sherlock.stanford.edu' can't be established.\nECDSA key fingerprint is SHA256:eB0bODKdaCWtPgv0pYozsdC5ckfcBFVOxeMwrNKdkmg.\nAre you sure you want to continue connecting (yes/no)?\n

    This warning is normal: your SSH client warns you that it is the first time it sees that new computer. To make sure you are actually connecting to the right machine, you should compare the ECDSA key fingerprint shown in the message with one of the fingerprints below:

    Key type Key Fingerprint RSA SHA256:T1q1Tbq8k5XBD5PIxvlCfTxNMi1ORWwKNRPeZPXUfJAlegacy format: f5:8f:01:46:d1:f9:66:5d:33:58:b4:82:d8:4a:34:41 ECDSA SHA256:eB0bODKdaCWtPgv0pYozsdC5ckfcBFVOxeMwrNKdkmglegacy format: 70:4c:76:ea:ae:b2:0f:81:4b:9c:c6:5a:52:4c:7f:64

    If they match, you can proceed and type \u2018yes\u2019. Your SSH program will then store that key and will verify it for every subsequent SSH connection, to make sure that the server you're connecting to is indeed Sherlock.

    ","tags":["connection"]},{"location":"docs/storage/data-transfer/#host-keys-warning","title":"Host keys warning","text":"

    If you've connected to Sherlock 1.0 before, there's a good chance the Sherlock 1.0 keys were stored by your local SSH client. In that case, when connecting to Sherlock 2.0 using the sherlock.stanford.edu alias, you will be presented with the following message:

    @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n@ WARNING: POSSIBLE DNS SPOOFING DETECTED! @\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\nThe RSA host key for sherlock.stanford.edu has changed, and the key for\nthe corresponding IP address 171.66.97.101 is unknown. This could\neither mean that DNS SPOOFING is happening or the IP address for the\nhost and its host key have changed at the same time.\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n@ WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED! @\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\nIT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY!\nSomeone could be eavesdropping on you right now (man-in-the-middle\nattack)!  It is also possible that a host key has just been changed.\nThe fingerprint for the RSA key sent by the remote host is\nSHA256:T1q1Tbq8k5XBD5PIxvlCfTxNMi1ORWwKNRPeZPXUfJA.\nPlease contact your system administrator.\n

    You can just check that the SHA256 key listed in that warning message correctly matches the one listed in the table above, and if that's the case, you can safely remove the sherlock.stanford.edu entry from your ~/.ssh/known_hosts file with the following command on your local machine:

    $ ssh-keygen -R sherlock.stanford.edu\n

    and then connect again. You'll see the first-connection prompt mentioned above, and your SSH client will store the new keys for future connections.

    ","tags":["connection"]},{"location":"docs/storage/data-transfer/#ssh-based-protocols","title":"SSH-based protocols","text":"

    User name

    In all the examples below, you'll need to replace <sunetid> by your actual SUNet ID. If you happen to use the same login name on your local machine, you can omit it.

    ","tags":["connection"]},{"location":"docs/storage/data-transfer/#scp-secure-copy","title":"SCP (Secure Copy)","text":"

    The easiest command to use to transfer files to/from Sherlock is scp. It works like the cp command, except it can work over the network to copy files from one computer to another, using the secure SSH protocol.

    The general syntax to copy a file to a remote server is:

    $ scp <source_file_path> <username>@<remote_host>:<destination_path>'\n

    For instance, the following command will copy the file named foo from your local machine to your home directory on Sherlock:

    $ scp foo <sunetid>@login.sherlock.stanford.edu:\n
    Note the : character, that separates the hostname from the destination path. Here, the destination path is empty, which will instruct scp to copy the file in your home directory.

    You can copy foo under a different name, or to another directory, with the following commands:

    $ scp foo <sunetid>@login.sherlock.stanford.edu:bar\n$ scp foo <sunetid>@login.sherlock.stanford.edu:~/subdir/baz\n

    To copy back files from Sherlock to your local machine, you just need to reverse the order of the arguments:

    $ scp <sunetid>@login.sherlock.stanford.edu:foo local_foo\n

    And finally, scp also support recursive copying of directories, with the -r option:

    $ scp -r dir/ <sunetid>@login.sherlock.stanford.edu:dir/\n
    This will copy the dir/ directory and all of its contents in your home directory on Sherlock.

    ","tags":["connection"]},{"location":"docs/storage/data-transfer/#sftp-secure-file-transfer-protocol","title":"SFTP (Secure File Transfer Protocol)","text":"

    SFTP clients are interactive file transfer programs, similar to FTP, which perform all operations over an encrypted transport.

    A variety of graphical SFTP clients are available for different OSes:

    • WinSCP
    • SecureFX ,
    • Fetch2
    • CyberDuck

    When setting up your connection to Sherlock in the above programs, use the following information:

    Hostname: login.sherlock.stanford.edu\nPort:     22\nUsername: SUNet ID\nPassword: SUNet ID password\n

    OpenSSH also provides a command-line SFTP client, originally named sftp.

    To log in to Sherlock:

    $ sftp <sunetid>@login.sherlock.stanford.edu\nConnected to login.sherlock.stanford.edu.\nsftp>\n
    For more information about using the command-line SFTP client, you can refer to this tutorial for more details and examples.

    ","tags":["connection"]},{"location":"docs/storage/data-transfer/#rsync","title":"rsync","text":"

    If you have complex hierarchies of files to transfer, or if you need to synchronize a set of files and directories between your local machine and Sherlock, rsync will be the best tool for the job. It will efficiently transfer and synchronize files across systems, by checking the timestamp and size of files. Which means that it won't re-transfer files that have not changed since the last transfer, and will complete faster.

    For instance, to transfer the whole ~/data/ folder tree from your local machine to your home directory on Sherlock, you can use the following command:

    $ rsync -a ~/data/ <sunetid>@login.sherlock.stanford.edu:data/\n
    Note the slash (/) at the end of the directories name, which is important to instruct rsync to synchronize the whole directories.

    To get more information about the transfer rate and follow its progress, you can use additional options:

    $ rsync -avP ~/data/ <sunetid>@login.sherlock.stanford.edu:data/\nsending incremental file list\n./\nfile1\n      1,755,049 100%    2.01MB/s    0:00:00 (xfr#2, to-chk=226/240)\nfile2\n      2,543,699 100%    2.48MB/s    0:00:00 (xfr#3, to-chk=225/240)\nfile3\n     34,930,688  19%   72.62MB/s    0:00:08\n\n[...]\n
    For more information about using the rsync, you can refer to this tutorial for more details and examples.

    ","tags":["connection"]},{"location":"docs/storage/data-transfer/#sshfs","title":"SSHFS","text":"

    Sometimes, moving files in and out of the cluster, and maintaining two copies of each of the files you work on, both on your local machine and on Sherlock, may be painful. Fortunately, Sherlock offers the ability to mount any of its filesystems to your local machine, using a secure and encrypted connection.

    With SSHFS, a FUSE-based filesystem implementation used to mount remote SSH-accessible filesystems, you can access your files on Sherlock as if they were locally stored on your own computer.

    This comes particularly handy when you need to access those files from an application that is not available on Sherlock, but that you already use or can install on your local machine. Like a data processing program that you have licensed for your own computer but can't use on Sherlock, a specific text editor that only runs on macOS, or any data-intensive 3D rendering software that wouldn't work comfortably enough over a forwarded X11 connection.

    SSHFS is available for Linux , macOS , and Windows .

    SSHFS on macOS

    SSHFS on macOS is known to try to automatically reconnect filesystem mounts after resuming from sleep or suspend, even without any valid credentials. As a result, it will generate a lot of failed connection attempts and likely make your IP address blacklisted on login nodes.

    Make sure to unmount your SSHFS drives before putting your macOS system to sleep to avoid this situation.

    The following option could also be useful to avoid some permission issues: -o defer_permissions

    For instance, on a Linux machine with SSHFS installed, you could mount your Sherlock home directory via a Sherlock DTN with the following commands:

    $ mkdir ~/sherlock_home\n$ sshfs <sunetid>@dtn.sherlock.stanford.edu:./ ~/sherlock_home\n

    Using DTNs for data transfer

    Using the Sherlock DTNs instead of login nodes will ensure optimal performance for data transfers. Login nodes only have limited resources, that could limit data transfer rates or disconnect during long data transfers.

    And to unmount it:

    $ umount ~/sherlock_home\n

    On Windows, once SSHFS is installed, you can mount the $SCRATCH filesystem as a network drive through the windows file explorer. To do this, go to \"This PC\", right-click in the \"Network Locations\" section of the window and select \"Add a Network Drive\". Then, in the \"Add Network Location Wizard\", you would use the following network address:

    \\\\sshfs\\<sunetid>@dtn.sherlock.stanford.edu\n

    This will mount the $SCRATCH partition as a network drive on your PC.

    For more information about using SSHFS on your local machine, you can refer to this tutorial for more details and examples.

    ","tags":["connection"]},{"location":"docs/storage/data-transfer/#globus","title":"Globus","text":"

    Globus improves SSH-based file transfer protocols by providing the following features:

    • automates large data transfers,
    • handles transient errors, and can resume failed transfers,
    • simplifies the implementation of high-performance transfers between computing centers.

    Globus is a Software as a Service (SaaS) system that provides end-users with a browser interface to initiate data transfers between endpoints. Globus allows users to \"drag and drop\" files from one endpoint to another. Endpoints are terminals for data; they can be laptops or supercomputers, and anything in between. The Globus web service negotiates, monitors, and optimizes transfers through firewalls and across network address translation (NAT). Under certain circumstances, with high performance hardware transfer rates exceeding 1 GB/s are possible. For more information about Globus, please see the Globus documentation.

    ","tags":["connection"]},{"location":"docs/storage/data-transfer/#authentication","title":"Authentication","text":"

    To use Globus, you will first need to authenticate at Globus.org. You can either sign up for a Globus account, or use your SUNet ID account for authentication to Globus (which will be required to authenticate to the Sherlock endpoint).

    To use your SUNet ID, choose \"Stanford University\" from the drop down menu at the Login page and follow the instructions from there.

    ","tags":["connection"]},{"location":"docs/storage/data-transfer/#transfer","title":"Transfer","text":"

    Endpoint name

    The Globus endpoint name for Sherlock is SRCC Sherlock.

    Oak endpoint

    The Sherlock endpoint only provides access to Sherlock-specific file systems ($HOME, $GROUP_HOME, $SCRATCH and $GROUP_SCRATCH). Oak features its own Globus endpoint: SRCC Oak.

    You can use Globus to transfer data between your local workstation (e.g., your laptop or desktop) and Sherlock. In this workflow, you configure your local workstation as a Globus endpoint by installing the Globus Connect software.

    1. Log in to Globus.org
    2. Use the Manage Endpoints interface to \"add Globus Connect Personal\" as an endpoint (you'll need to install Globus Connect Personal on your local machine)
    3. Transfer Files, using your new workstation endpoint for one side of the transfer, and the Sherlock endpoint (SRCC Sherlock) on the other side.

    You can also transfer data between two remote endpoints, by choosing another endpoint you have access to instead of your local machine.

    ","tags":["connection"]},{"location":"docs/storage/data-transfer/#cli-and-api","title":"CLI and API","text":"

    Globus also provides a command-line interface (CLI) and application programming interface (API) as alternatives to its web interface.

    For more information about the API, please see the Globus API documentation for more details.

    For more information about the CLI, please see the Globus CLI documentation and Globus CLI quick start. Note that the Globus CLI is available through the module system on Sherlock:

    $ module load system py-globus-cli\n$ globus login\n# follow instructions to get set up\n

    Once you've authorized the application, you can use the globus CLI to copy files in between endpoints and collections that you have access to. Endpoints and collections are identified by their unique UUID4 identifiers, which are viewable through the Globus web app. The CLI will step you through any additional authorizations required for you to access the endpoints or collections.

    For example, to asynchronously copy files between Sherlock and Oak (if that you have already been allocated Oak storage):

    $ GLOBUS_SHERLOCK_UUID=\"6881ae2e-db26-11e5-9772-22000b9da45e\"\n$ GLOBUS_OAK_UUID=\"8b3a8b64-d4ab-4551-b37e-ca0092f769a7\"\n$ globus transfer --recursive \\\n    \"$GLOBUS_SHERLOCK_UUID:$SCRATCH/my-interesting-project\" \\\n    \"$GLOBUS_OAK_UUID:$OAK/my-interesting-project-copy\"\n
    ","tags":["connection"]},{"location":"docs/storage/data-transfer/#data-transfer-nodes-dtns","title":"Data Transfer Nodes (DTNs)","text":"

    No shell

    The DTNs don't provide any interactive shell, so connecting via SSH directly won't work. It will only accept scp, sftp, rsync of bbcp connections.

    A pool of dedicated Data Transfer Nodes is available on Sherlock, to provide exclusive resources for large-scale data transfers.

    The main benefit of using it is that transfer tasks can't be disrupted by other users interactive tasks or filesystem access and I/O-related workloads on the login nodes.

    By using the Sherlock DTNs, you'll make sure that your data flows will go through a computer whose sole purpose is to move data around.

    It supports:

    • SSH-based protocols (such as the ones described above)
    • bbcp
    • Globus

    To transfer files via the DTNs, simply use dtn.sherlock.stanford.edu as a remote server host name. For instance:

    $ scp foo <sunetid>@dtn.sherlock.stanford.edu:~/foo\n

    $HOME on DTNs

    One important difference to keep in mind when transferring files through the Sherlock DTNs is that the default destination path for files, unless specified, is the user $SCRATCH directory, not $HOME.

    That means that the following command:

    $ scp foo <sunetid>@dtn.sherlock.stanford.edu:\n
    will create the foo file in $SCRATCH/foo, and not in $HOME/foo.

    You can transfer file to your $HOME directory via the DTNs by specifying the full path as the destination: $ scp foo <sunetid>@dtn.sherlock.stanford.edu:$HOME/foo

    ","tags":["connection"]},{"location":"docs/storage/data-transfer/#cloud-storage","title":"Cloud storage","text":"

    If you need to backup some of your Sherlock files to cloud-based storage services, we also provide a set of utilities that can help.

    ","tags":["connection"]},{"location":"docs/storage/data-transfer/#google-drive","title":"Google Drive","text":"

    Google Drive storage for Stanford users

    For more information about using Google Drive at Stanford, please see the University IT Google Drive page.

    We provide the rclone tool on Sherlock to interact with Google Drive. You'll just need to load the rclone module to be able to use it to move your files from/to Google Drive:

    $ module load system rclone\n$ rclone --help\n

    This tutorial provides an example of transferring files between Google Drive and Oak storage.

    The Globus CLI (see above) can also be used to copy files from Sherlock to Stanford's Google Drive.

    ","tags":["connection"]},{"location":"docs/storage/data-transfer/#aws","title":"AWS","text":"

    You can also access AWS storage from the Sherlock command line with the AWS Command Line Interface:

    $ module load system aws-cli\n$ aws help\n
    ","tags":["connection"]},{"location":"docs/storage/data-transfer/#other-services","title":"Other services","text":"

    If you need to access other cloud storage services, you can use rclone: it can be used to sync files and directories to and from Google Drive, Amazon S3, Box, Dropbox, Google Cloud Storage, Amazon Drive, Microsoft OneDrive and many more.

    $ ml load system rclone\n$ rclone -h\n

    For more details about how to use rclone, please see the official documentation.

    1. For more details, see the SSH clients page.\u00a0\u21a9

    2. Fetch is a commercial program, and is available as part of the Essential Stanford Software bundle.\u00a0\u21a9

    ","tags":["connection"]},{"location":"docs/storage/filesystems/","title":"Filesystems","text":"

    The following sections describe the characteristics and best uses of each of the Sherlock's filesystems.

    "},{"location":"docs/storage/filesystems/#home","title":"$HOME","text":"

    Summary

    $HOME is your home directory. It's the best place to keep your code and important data as it provides snapshots and off-site replication. It is not meant to host data that will be actively read and written to by compute jobs.

    Characteristics Type high speed, distributed NFS file system Quota 15 GB for the whole $HOME directory Snapshots yes (cf. Snapshots) for more info) Backups off-site replication Purge policy not purged Scope all login and compute nodes"},{"location":"docs/storage/filesystems/#recommended-usage","title":"Recommended usage","text":"

    $HOME is best suited for personal configuration files, scripts, small reference files or datasets, source code and individual software installation

    When you log in, the system automatically sets the current working directory to $HOME: it's the location you'll end up when connecting to Sherlock. You can store your source code and build your executables there.

    We strongly recommend using $HOME to reference your home directory in scripts, rather than its explicit path.

    "},{"location":"docs/storage/filesystems/#checking-quota-usage","title":"Checking quota usage","text":"

    The sh_quota tool can be used to display quota usage on $HOME

    $ sh_quota -f HOME\n

    See the Checking Quotas section for more details.

    "},{"location":"docs/storage/filesystems/#group_home","title":"$GROUP_HOME","text":"

    Summary

    $GROUP_HOME is your group home directory. It's the best place to keep your group's shared code, software installations and important data as it provides snapshots and off-site replication. It is not meant to host data that will be actively read and written to by compute jobs.

    $HOME and $GROUP_HOME are based on the same physical file system.

    Characteristics Type high speed, distributed NFS file system Quota 1 TB for the whole $GROUP_HOME directory Snapshots yes (cf. Snapshots) for more info) Backups off-site replication Purge policy not purged Scope all login and compute nodes"},{"location":"docs/storage/filesystems/#recommended-usage_1","title":"Recommended usage","text":"

    $GROUP_HOME is best suited for group shared source code, common software installations, shared data sets and scripts.

    We strongly recommend using $GROUP_HOME to reference your group home directory in scripts, rather than its explicit path.

    "},{"location":"docs/storage/filesystems/#checking-quota-usage_1","title":"Checking quota usage","text":"

    The sh_quota tool can be used to display quota usage on $GROUP_HOME

    $ sh_quota -f GROUP_HOME\n

    See the Checking Quotas section for more details.

    "},{"location":"docs/storage/filesystems/#scratch","title":"$SCRATCH","text":"

    Summary

    $SCRATCH is your personal scratch space. It's the best place to store temporary files, such as raw job output, intermediate files, unprocessed results, and so on.

    Purge policy

    Files are automatically purged from $SCRATCH after an inactivity period:

    • files that are not modified after 90 days are automatically deleted,
    • contents need to change for a file to be considered modified. The touch command does not modify file contents and thus does not extend a file's lifetime on the filesystem.

    $SCRATCH is not meant to store permanent data, and should only be used for data associated with currently running jobs. It's not a target for backups, archived data, etc. See the Expiration Policy section for details.

    Characteristics Type Parallel, high-performance Lustre file system Quota 100 TB / 20,000,000 inodes2 Snapshots NO Backups NO Purge policy data not modified in the last 90 days are automatically purged Scope all login and compute nodes"},{"location":"docs/storage/filesystems/#recommended-usage_2","title":"Recommended usage","text":"

    $SCRATCH is best suited for large files, such as raw job output, intermediate job files, unprocessed simulation results, and so on. This is the recommended location to run jobs from, and to store files that will be read or written to during job execution.

    Old files are automatically purged on $SCRATCH so users should avoid storing long-term data there.

    Each compute node has a low latency, high-bandwidth Infiniband link to $SCRATCH. The aggregate bandwidth of the filesystem is about 75GB/s. So any job with high data performance requirements will take advantage from using $SCRATCH for I/O.

    We strongly recommend using $SCRATCH to reference your scratch directory in scripts, rather than its explicit path.

    "},{"location":"docs/storage/filesystems/#checking-quota-usage_2","title":"Checking quota usage","text":"

    The sh_quota tool can be used to display quota usage on $SCRATCH

    $ sh_quota -f SCRATCH\n

    See the Checking Quotas section for more details.

    "},{"location":"docs/storage/filesystems/#expiration-policy","title":"Expiration policy","text":"

    Inactive files are automatically purged

    Files that are not modified in the last 90 days will be automatically deleted from the filesystem.

    To manage available space and maintain optimal performance for all jobs, all files on $SCRATCH are subject to automatic purges. Meaning that after a period of inactivity, files that are not used anymore will be automatically deleted from the filesystem.

    File activity is defined based on the last time a file's contents (the actual data in the file) have been modified. Meaning that files whose contents have not been modified in the previous 90 days will be automatically deleted.

    Each time a file's contents are modified, the expiration countdown is reset, and the file gets another 90-day of lifetime.

    Metadata changes don't qualify as an update

    Modifying a file's contents is the only way to reset the expiration countdown and extend the file's lifetime on the filesystem.

    Metadata modifications such as: reading the file, renaming it, moving it to a different directory, changing its permissions or its ownership, \"touching\" it to update its last modification or access times, won't have any effect on the purge countdown.

    Purges are based on an internal filesystem property that reflects the last date a file's data has been modified, and which is unfortunately not readily accessible by users.

    Please note that tools like ls will only display the date of the last metadata1 modification for a file, which is not necessarily relevant to determine a file's eligibility for deletion. For instance, using the touch command on a file to update its last modification date will only update the metadata, not the data, and as such, will not reset the purge countdown timer.

    Filesystem purges are a continuous process: they don't run at particular times, but are carried out in a permanent background fashion. Files are not necessarily deleted right away when they become eligible for deletion. For instance, if you create a file on February 1st and don't ever modify it afterwards, it will be automatically become eligible for deletion on May 1st, and can be deleted anytime after this date.

    Empty directory trees that stay devoid of any file for more than 90 days will be automatically cleaned up as well.

    "},{"location":"docs/storage/filesystems/#group_scratch","title":"$GROUP_SCRATCH","text":"

    $SCRATCH and $GROUP_SCRATCH are based on the same physical file system.

    Summary

    $GROUP_SCRATCH is your group shared scratch space. It's the best place to store temporary files, such as raw job output, intermediate files, or unprocessed results that need to be shared among users within a group.

    $GROUP_SCRATCH is NOT a backup target

    $GROUP_SCRATCH is not meant to store permanent data, and should only be used for data associated with currently running jobs. It's not a target for backups, archived data, etc.

    Characteristics Type parallel, high-performance Lustre file system Quota 100 TB / 20,000,000 inodes2 Snapshots NO Backups NO Purge policy data not accessed in the last 90 days are automatically purged Scope all login and compute nodes"},{"location":"docs/storage/filesystems/#recommended-usage_3","title":"Recommended usage","text":"

    $GROUP_SCRATCH is best suited for large files, such as raw job output, intermediate job files, unprocessed simulation results, and so on. This is the recommended location to run jobs from, and to store files that will be read or written to during job execution.

    Old files are automatically purged on $GROUP_SCRATCH so users should avoid storing long-term data there.

    We strongly recommend using $GROUP_SCRATCH to reference your group scratch directory in scripts, rather than its explicit path.

    "},{"location":"docs/storage/filesystems/#checking-quota-usage_3","title":"Checking quota usage","text":"

    The sh_quota tool can be used to display quota usage on $GROUP_SCRATCH

    $ sh_quota -f GROUP_SCRATCH\n

    See the Checking Quotas section for more details.

    "},{"location":"docs/storage/filesystems/#expiration-policy_1","title":"Expiration policy","text":"

    As $SCRATCH and $GROUP_SCRATCH are on the same filesystem, the same expiration policy applies to both. Please see the $SCRATCH section above for more details.

    "},{"location":"docs/storage/filesystems/#l_scratch","title":"$L_SCRATCH","text":"

    Summary

    $L_SCRATCH is local to each compute node, and could be used to store temporary files for jobs with high IOPS requirements. Files stored in $L_SCRATCH are purged at the end of the job.

    Characteristics Type local filesystem, specific to each node, based on SSD Quota n/a (usable space limited by the size of the physical storage devices, typically around 150 GB) Snapshots NO Backups NO Purge policy data immediately purged at the end of the job Scope locally on each node, not shared across nodes"},{"location":"docs/storage/filesystems/#recommended-usage_4","title":"Recommended usage","text":"

    $L_SCRATCH is best suited for small temporary files and applications which require low latency and high IOPS levels, typically intermediate job files, checkpoints, dumps of temporary states, etc.

    Files stored in $L_SCRATCH are local to each node and can't be accessed from other nodes, nor from login nodes.

    Please note that an additional, job-specific environment variable, $L_SCRATCH_JOB, will be set to a subdirectory of $L_SCRATCH for each job. So, if you have two jobs running on the same compute node, $L_SCRATCH will be the same and accessible from both jobs, while $L_SCRATCH_JOB will be different for each job.

    For instance, if you have jobs 98423 and 98672 running on this same nodes, the variables will be set as follows:

    Job id $L_SCRATCH L_SCRATCH_JOB 98423 /lscratch/kilian /lscratch/kilian/98423 98672 /lscratch/kilian /lscratch/kilian/98672

    We strongly recommend using $L_SCRATCH to reference your local scratch directory in scripts, rather than its full path.

    "},{"location":"docs/storage/filesystems/#expiration-policy_2","title":"Expiration policy","text":"

    All files stored in $L_SCRATCH_JOB are automatically purged at the end of the job, whether the job was successful or not. If you need to conserve files that were generated in $L_SCRATCH_JOB after the job ends, don't forget to add a command at the end of your batch script to copy them to one of the more persistent storage locations, such as $HOME or $SCRATCH.

    Data stored in $L_SCRATCH will be purged at the end of a job, only if no other job from the same user is still running on the node. Which means that data stored in $L_SCRATCH (but in not $L_SCRATCH_JOB) will persist on the node until the last job from the user terminates.

    "},{"location":"docs/storage/filesystems/#oak","title":"$OAK","text":"

    Summary

    $OAK is Stanford Research Computing's research data storage offering. It provides an affordable, longer-term storage option for labs and researchers, and is ideally suited to host large datasets, or curated, post-processed results from job campaigns, as well as final results used for publication.

    Order $OAK

    Oak storage can be easily ordered online using the Oak Storage Service page.

    $OAK is opt-in and is available as an option on Sherlock. Meaning that only members of groups which have purchased storage on Oak can access this filesystem.

    For complete details and characteristics, including pricing, please refer to the Oak Storage Service page.

    Characteristics Type parallel, capacitive Lustre filesystem Quota amount purchased (in 10 TB increments) Snapshots NO Backups optional cloud backup available please contact us for details Purge policy not purged Scope all login and compute nodes also available through gateways outside of Sherlock"},{"location":"docs/storage/filesystems/#recommended-usage_5","title":"Recommended usage","text":"

    $OAK is ideally suited for large shared datasets, archival data and curated, post-processed results from job campaigns, as well as final results used for publication.

    Although jobs can directly read and write to $OAK during execution, it is recommended to first stage files from $OAK to $SCRATCH at the beginning of a series of jobs, and save the desired results back from $SCRATCH to $OAK at the end of the job campaign.

    We strongly recommend using $OAK to reference your group home directory in scripts, rather than its explicit path.

    $OAK is not backed up

    $OAK is not backed up or replicated, by design, and deleted files cannot be recovered. We recommend all researchers to keep an additional copy of their important files (for instance, in Google Drive).

    Cloud backup option

    For additional data security, Stanford Research Computing now offers \"cloud backup\" of Oak data as a managed service option. For an additional monthly fee, data on Oak can be backed up to the cloud (researchers are responsible for cloud storage costs). Please contact us if you'd like additional information.

    "},{"location":"docs/storage/filesystems/#checking-quota-usage_4","title":"Checking quota usage","text":"

    The sh_quota tool can be used to display quota usage on $OAK

    $ sh_quota -f OAK\n

    See the Checking Quotas section for more details.

    1. Metadata are data such as a file's size, name, path, owner, permissions, etc.\u00a0\u21a9

    2. An inode (index node) is a data structure in a Unix-style file system that describes a file-system object such as a file or a directory.\u00a0\u21a9\u21a9

    "},{"location":"docs/tech/","title":"Technical specifications","text":"","tags":["tech"]},{"location":"docs/tech/#in-a-nutshell","title":"In a nutshell","text":"

    Sherlock features over 1,700 compute nodes, 55,600+ CPU cores and 700+ GPUs, for a total computing power of more than 5.4 Petaflops. That would rank it in the Top500 list of the most powerful supercomputers in the world.

    The cluster currently extends across 2 Infiniband fabrics (EDR, HDR). A 9.7 PB parallel, distributed filesystem, delivering over 200 GB/s of I/O bandwidth, provides scratch storage for more than 7,200 users, and 1,100 PI groups.

    ","tags":["tech"]},{"location":"docs/tech/#resources","title":"Resources","text":"

    The Sherlock cluster has been initiated in January 2014 with a base of freely available computing resources (about 2,000 CPU cores) and the accompanying networking and storage infrastructure (about 1 PB of shared storage).

    Since then, it's been constantly expanding, spawning multiple cluster generations, with numerous contributions from many research groups on campus.

    Cluster generations

    For more information about Sherlock's ongoing evolution and expansion, please see Cluster generations.

    ","tags":["tech"]},{"location":"docs/tech/#interface","title":"Interface","text":"Type Qty Details login nodes 12 sherlock.stanford.edu (load-balanced) data transfer nodes 3 dedicated bandwidth for large data transfers","tags":["tech"]},{"location":"docs/tech/#computing","title":"Computing","text":"

    Access to computing resources

    Computing resources marked with below are freely available to every Sherlock user. Resources marked with are only accessible to Sherlock owners and their research teams.

    Type Access Nodes CPU cores Details compute nodesnormal partition 195 5,236 - 57x 20 (Intel E5-2640v4), 128 GB RAM, EDR IB- 40x 24 (Intel 5118), 191 GB RAM, EDR IB- 28x 32 (AMD 7543), 256 GB RAM, HDR IB- 70x 32 (AMD 7502), 256 GB RAM, HDR IB development nodesdev partition 4 104 - 2x 20 (Intel E5-2640v4), 128 GB RAM, EDR IB- 2x 32 (AMD 7543P), 256 GB RAM, HDR IB- 32x Tesla A30_MIG-1g.6gb large memory nodesbigmem partition 9 504 - 4x 24 (Intel 5118), 384 GB RAM, EDR IB- 1x 32 (Intel E5-2697Av4), 512 GB RAM, EDR IB- 1x 56 (Intel E5-4650v4), 3072 GB RAM, EDR IB- 1x 64 (AMD 7502), 4096 GB RAM, HDR IB- 2x 128 (AMD 7742), 1024 GB RAM, HDR IB GPU nodesgpu partition 26 748 - 1x 20 (Intel E5-2640v4), 256 GB RAM, EDR IB- 4x Tesla P100 PCIe - 1x 20 (Intel E5-2640v4), 256 GB RAM, EDR IB- 4x Tesla P40 - 3x 20 (Intel E5-2640v4), 256 GB RAM, EDR IB- 4x Tesla V100_SXM2 - 1x 24 (Intel 5118), 191 GB RAM, EDR IB- 4x Tesla V100_SXM2 - 2x 24 (Intel 5118), 191 GB RAM, EDR IB- 4x Tesla V100 PCIe - 16x 32 (AMD 7502P), 256 GB RAM, HDR IB- 4x Geforce RTX_2080Ti - 2x 32 (AMD 7502P), 256 GB RAM, HDR IB- 4x Tesla V100S PCIe privately-owned nodesowners partition 1,494 48,680 40 different node configurations, including GPU and bigmem nodes Total 1,732 55,632 796","tags":["tech"]},{"location":"docs/tech/#storage","title":"Storage","text":"

    More information

    For more information about storage options on Sherlock, please refer to the Storage section of the documentation.

    Sherlock is architected around shared storage components, meaning that users can find the same files and directories from all of the Sherlock nodes.

    • Highly-available NFS filesystem for user and group home directories (with hourly snapshots and off-site replication)
    • High-performance Lustre scratch filesystem (9.7 PB parallel, distributed filesystem, delivering over 200 GB/s of I/O bandwidth)
    • Direct access to Stanford Research Computing's Oak long-term research data storage system (58.3 PB)
    ","tags":["tech"]},{"location":"docs/tech/facts/","title":"Sherlock facts","text":"

    as of May 2024

    ","tags":["tech"]},{"location":"docs/tech/facts/#users","title":"Users","text":"
    • 7,276 user accounts

    • 1,137 PI groups

      from all Stanford's seven Schools, SLAC, Stanford Institutes, etc.

    • 202 owner groups

    ","tags":["tech"]},{"location":"docs/tech/facts/#interfaces","title":"Interfaces","text":"
    • 12 login nodes

    • 3 data transfer nodes (DTNs)

    ","tags":["tech"]},{"location":"docs/tech/facts/#computing","title":"Computing","text":"
    • 5.44 PFLOPs (FP64)

      19.76 (FP32) PFLOPs

    • 55,632 CPU cores

      4 CPU generations (13 CPU models)

    • 796 GPUs

      4 GPU generations (12 GPU models)

    ","tags":["tech"]},{"location":"docs/tech/facts/#hardware","title":"Hardware","text":"
    • 1,732 compute nodes

      19 server models (from 3 different manufacturers)

    • 37 racks

      1,188 rack units

    ","tags":["tech"]},{"location":"docs/tech/facts/#energy","title":"Energy","text":"
    • 554.03 kW

      total power usage

    • 58 PDUs

    ","tags":["tech"]},{"location":"docs/tech/facts/#storage","title":"Storage","text":"
    • 9.7 PB $SCRATCH

      parallel, distributed filesystem, delivering over 200 GB/s of I/O bandwidth

    • 58.3 PB $OAK

      long term research data storage

    ","tags":["tech"]},{"location":"docs/tech/facts/#networking","title":"Networking","text":"
    • 104 Infiniband switches

      across 2 Infiniband fabrics (EDR, HDR)

    • 5,730 Infiniband cables

      spanning about 30.12 km

    • 53 Ethernet switches

    ","tags":["tech"]},{"location":"docs/tech/facts/#scheduler","title":"Scheduler","text":"
    • 179 Slurm partitions

    • 43,261 CPU.hours/day

      over 4 years of computing in a single day

    • $2,890,546 /month

      to run the same workload on t2.large on-demand cloud instances

    ","tags":["tech"]},{"location":"docs/tech/status/","title":"Status","text":"

    Scheduled maintenances

    Maintenance operations and upgrades are scheduled on Sherlock on a regular basis. Per the University's Minimum Security policies, we deploy security patches on Sherlock as required for compliance.

    "},{"location":"docs/tech/status/#components-and-services","title":"Components and services","text":"

    Sherlock status is

    For more details about Sherlock components and services, see the status dashboard.

    "},{"location":"docs/tech/status/#current-usage","title":"Current usage","text":""},{"location":"docs/user-guide/gpu/","title":"GPU nodes","text":"

    To support the latest computing advancements in many fields of science, Sherlock features a number of compute nodes with GPUs that can be used to run a variety of GPU-accelerated applications. Those nodes are available to everyone, but are a scarce, highly-demanded resource, so getting access to them may require some wait time in queue.

    Getting your own GPU nodes

    If you need frequent access to GPU nodes, we recommend considering becoming an owner on Sherlock, so you can have immediate access to your GPU nodes when you need them.

    "},{"location":"docs/user-guide/gpu/#gpu-nodes","title":"GPU nodes","text":"

    A limited number of GPU nodes are available in the gpu partition. Anybody running on Sherlock can submit a job there. As owners contribute to expand Sherlock, more GPU nodes are added to the owners partition, for use by PI groups which purchased their own compute nodes.

    There are a variety of different GPU configuration available in the gpu partition. To see the available GPU types, please see the GPU types section.

    "},{"location":"docs/user-guide/gpu/#submitting-a-gpu-job","title":"Submitting a GPU job","text":"

    To submit a GPU job, you'll need to use the --gpus (or -G) option in your batch script or command line submission options.

    For instance, the following script will request one GPU for two hours in the gpu partition, and run the GPU-enabled version of gromacs:

    #!/bin/bash\n#SBATCH -p gpu\n#SBATCH -c 10\n#SBATCH -G 1\n\nml load gromacs/2016.3\n\nsrun gmx_gpu ...\n

    You can also directly run GPU processes on compute nodes with srun. For instance, the following command will display details about the GPUs allocated to your job:

    $ srun -p gpu --gpus 2 nvidia-smi\nFri Jul 28 12:41:49 2017\n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 375.51                 Driver Version: 375.51                    |\n|-------------------------------+----------------------+----------------------+\n| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |\n| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |\n|===============================+======================+======================|\n|   0  Tesla P40           On   | 0000:03:00.0     Off |                    0 |\n| N/A   26C    P8    10W / 250W |      0MiB / 22912MiB |      0%   E. Process |\n+-------------------------------+----------------------+----------------------+\n|   1  Tesla P40           On   | 0000:04:00.0     Off |                    0 |\n| N/A   24C    P8    10W / 250W |      0MiB / 22912MiB |      0%   E. Process |\n+-------------------------------+----------------------+----------------------+\n\n+-----------------------------------------------------------------------------+\n| Processes:                                                       GPU Memory |\n|  GPU       PID  Type  Process name                               Usage      |\n|=============================================================================|\n|  No running processes found                                                 |\n+-----------------------------------------------------------------------------+\n

    GPU resources MUST be requested explicitly

    Jobs will be rejected at submission time if they don't explicitly request GPU resources.

    The gpu partition only accepts jobs explicitly requesting GPU resources. If they don't, they will be rejected with the following message:

    $ salloc -p gpu\nsrun: error: Unable to allocate resources: Job violates accounting/QOS policy (job submit limit, user's size and/or time limits)\n
    "},{"location":"docs/user-guide/gpu/#interactive-sessions","title":"Interactive sessions","text":"

    As for any other compute node, you can submit an interactive job and request a shell on a GPU node with the following command:

    $ salloc -p gpu --gpus 1\nsalloc: job 38068928 queued and waiting for resources\nsalloc: job 38068928 has been allocated resources\n$ nvidia-smi --query-gpu=index,name --format=csv,noheader\n0, Tesla V100-SXM2-16GB\n
    "},{"location":"docs/user-guide/gpu/#instant-lightweight-gpu-instances","title":"Instant lightweight GPU instances","text":"

    Given that some tasks don't necessarily require a full-fledged, top-of-the-line GPU, lightweight GPU instances are provided to allow instant access to GPU resources for quick debugging, prototyping or testing jobs.

    Lightweight GPU instances

    Lightweight GPU instances leverage NVIDIA\u2019s Multi-Instance GPU (MIG) to provide multiple fully isolated GPU instances on the same physical GPU, each with their own high-bandwidth memory, cache, and compute cores.

    Those GPU instances are instantly available via the dev partition, and can be requested with the sh_dev command:

    # sh_dev -g 1\n[...]\n[kilian@sh03-17n15 ~] (job 17628407) $ nvidia-smi -L\nGPU 0: NVIDIA A30 (UUID: GPU-ac772b5a-123a-dc76-9480-5998f435fe84)\n  MIG 1g.6gb      Device  0: (UUID: MIG-87e5d835-8046-594a-b237-ccc770b868ef)\n

    For interactive apps in the Sherlock OnDemand interface, requesting a GPU in the dev partition will initiate an interactive session with access to a lightweight GPU instance.

    "},{"location":"docs/user-guide/gpu/#gpu-types","title":"GPU types","text":"

    Since Sherlock features many different types of GPUs, each with its own technical characteristics, performance profiles and specificities, you may want to ensure that your job runs on a specific type of GPU.

    To that end, Slurm allows users to specify constraints when submitting jobs, which will indicate the scheduler that only nodes having features matching the job constraints could be used to satisfy the request. Multiple constraints may be specified and combined with various operators (please refer to the official Slurm documentation for details).

    The list of available features on compute nodes can be obtained with the node_feat1 command. And more specifically, to list the GPU-related features of nodes in the gpu partition::

    $ node_feat -p gpu | grep GPU_\nGPU_BRD:TESLA\nGPU_GEN:PSC\nGPU_MEM:16GB\nGPU_MEM:24GB\nGPU_SKU:TESLA_P100_PCIE\nGPU_SKU:TESLA_P40\n

    You can use node_feat without any option to list all the features of all the nodes in all the partitions. But please note that node_feat will only list the features of nodes from partitions you have access to, so output may vary depending on your group membership.

    The different characteristics2 of various GPU types are listed in the following table

    Slurm\u00a0feature Description Possible values Example job constraint GPU_BRD GPU brand GEFORCE: GeForce / TITANTESLA: Tesla #SBATCH -C GPU_BRD:TESLA GPU_GEN GPU generation PSC: PascalMXW: Maxwell #SBATCH -C GPU_GEN:PSC GPU_MEM Amount of GPU memory 16GB, 24GB #SBATCH -C GPU_MEM:16GB GPU_SKU GPU model TESLA_P100_PCIETESLA_P40 #SBATCH -C GPU_SKU:TESLA_P40

    Depending on the partitions you have access to, more features may be available to be requested in your jobs.

    For instance, to request a Tesla GPU for you job, you can use the following submission options:

    $ srun -p gpu -G 1 -C GPU_BRD:TESLA nvidia-smi -L\nGPU 0: Tesla P100-SXM2-16GB (UUID: GPU-4f91f58f-f3ea-d414-d4ce-faf587c5c4d4)\n

    Unsatisfiable constraints

    If you specify a constraint that can't be satisfied in the partition you're submitting your job to, the job will be rejected by the scheduler. For instance, requesting a RTX3090 GPU in the gpu partition, which doesn't feature any, will result in an error:

    $ srun -p gpu -G 1 -C GPU_SKU:RTX_3090 nvidia-smi -L\nsrun: error: Unable to allocate resources: Requested node configuration is not available\n

    For more information about requesting specific node features and adding job constraints, you can also refer to the \"Node features\" page.

    "},{"location":"docs/user-guide/gpu/#gpu-compute-modes","title":"GPU compute modes","text":"

    By default, GPUs on Sherlock are set in the Exclusive Process compute mode3, to provide the best performance and an isolated environment for jobs, out of the box.

    Some software may require GPUs to be set to a different compute mode, for instance to share a GPU across different processes within the same application.

    To handle that case, we developed a specific option, --gpu_cmode, that users can add to their srun and sbatch submission options, to choose the compute mode for the GPUs allocated to their job.

    Here's the list of the different compute modes supported on Sherlock's GPUs:

    GPU\u00a0compute\u00a0mode --gpu_cmode option Description \"Default\" shared Multiple contexts are allowed per device (NVIDIA default) \"Exclusive Process\" exclusive Only one context is allowed per device, usable from multiple threads at a time (Sherlock default) \"Prohibited\" prohibited No CUDA context can be created on the device

    By default, or if the --gpu_cmode option is not specified, GPUs will be set in the \"Exclusive Process\" mode, as demonstrated by this example command:

    $ srun -p gpu -G 1 nvidia-smi\n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 387.26                 Driver Version: 387.26                    |\n|-------------------------------+----------------------+----------------------+\n| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |\n| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |\n|===============================+======================+======================|\n|   0  Tesla P40           On   | 00000000:03:00.0 Off |                    0 |\n| N/A   22C    P8    10W / 250W |      0MiB / 22912MiB |      0%   E. Process |\n+-------------------------------+----------------------+----------------------+\n

    With the --gpu_cmode option, the scheduler will set the GPU compute mode to the desired value before execution:

    $ srun -p gpu -G 1 --gpu_cmode=shared nvidia-smi\n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 387.26                 Driver Version: 387.26                    |\n|-------------------------------+----------------------+----------------------+\n| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |\n| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |\n|===============================+======================+======================|\n|   0  Tesla P40           On   | 00000000:03:00.0 Off |                    0 |\n| N/A   22C    P8    10W / 250W |      0MiB / 22912MiB |      0%      Default |\n+-------------------------------+----------------------+----------------------+\n

    Tip

    \"Default\" is the name that the NVIDIA System Management Interface (nvidia-smi) uses to describe the mode where a GPU can be shared between different processes. It does not represent the default GPU compute mode on Sherlock, which is \"Exclusive Process\".

    "},{"location":"docs/user-guide/gpu/#advanced-options","title":"Advanced options","text":"

    A number of submission options are available when submitting GPU jobs, to request specific resource mapping or task binding options.

    Here are some examples to allocate a set of resources as a function of the number of requested GPUs:

    • --cpus-per-gpu: requests a number of CPUs per allocated GPU.

      For instance, the following options will allocate 2 GPUs and 4 CPUs:

      $ salloc -p gpu -G 2 --cpus-per-gpu=2\n
    • --gpus-per-node: requests a number of GPUs per node,

    • --gpus-per-task: requests a number of GPUs per spawned task,
    • --mem-per-gpu: allocates (host) memory per allocated GPU.

    Other options can help set particular GPU properties (topology, frequency...):

    • --gpu-bind: specify task/GPU binding mode.

      By default every spawned task can access every GPU allocated to the job. This option can help making sure that tasks are bound to the closest GPU, for better performance.

    • --gpu-freq: specify GPU and memory frequency. For instance:

      $ srun -p test -G 1 --gpu-freq=highm1,verbose /bin/true\nGpuFreq=memory_freq:2600,graphics_freq:758\n

    Those options are all available to the srun/sbatch/salloc commands, and more details about each of them can be found in the Slurm documentation.

    Conflicting options

    Given the multitude of options, it's very easy to submit a job with conflicting options. In most cases the job will be rejected.

    For instance:

    $ sbatch --gpus-per-task=1 --cpus-per-gpu=2  --cpus-per-task=1 ...\n
    Here, the first two options implicitly set cpu-per-task to 2, while the third option explicitly sets cpus-per-task to 1. So the job's requirements are conflicting and can't be satisfied.

    "},{"location":"docs/user-guide/gpu/#environment-and-diagnostic-tools","title":"Environment and diagnostic tools","text":""},{"location":"docs/user-guide/gpu/#nvtop","title":"nvtop","text":"

    GPU usage information can be shown with the nvtop tool. nvtop is available as a module, which can be loaded like this:

    $ ml load system nvtop\n

    nvtop provides an htop-like interactive view of GPU utilization. Users can monitor, estimate and fine tune their GPU resource requests with this tool. Percent GPU and memory utilization is shown as a user's GPU code is running.

    1. See node_feat -h for more details.\u00a0\u21a9

    2. The lists of values provided in the table are non exhaustive.\u00a0\u21a9

    3. The list of available GPU compute modes and relevant details are available in the CUDA Toolkit Documentation \u21a9

    "},{"location":"docs/user-guide/ondemand/","title":"OnDemand","text":""},{"location":"docs/user-guide/ondemand/#introduction","title":"Introduction","text":"

    The Sherlock OnDemand interface allows you to conduct your research on Sherlock through a web browser. You can manage files (create, edit and move them), submit and monitor your jobs, see their output, check the status of the job queue, run a Jupyter notebook and much more, without logging in to Sherlock the traditional way, via a SSH terminal connection.

    Quote

    In neuroimaging there are a number of software pipelines that output HTML reports heavy on images files. Sherlock OnDemand allows users to check those as they appear on their $SCRATCH folder, for quick quality control, instead of having to mount remote filesystems, download data locally or move to any other storage location. Since the data itself is already quite big and costly to move, OnDemand is extremely helpful for fast assessment.

    -- Carolina Ramirez, Williams PANLab

    "},{"location":"docs/user-guide/ondemand/#more-documentation","title":"More documentation","text":"

    Open OnDemand was created by the Ohio Supercomputer Center.

    The following documentation is specifically intended for using OnDemand on Sherlock. For more complete documentation about OnDemand in general, please see the extensive documentation for OnDemand created by OSC, including many video tutorials.

    "},{"location":"docs/user-guide/ondemand/#connecting","title":"Connecting","text":"

    Connection information

    To connect to Sherlock OnDemand, simply point your browser to https://ondemand.sherlock.stanford.edu

    Sherlock OnDemand requires the same level of authentication than connecting to Sherlock over SSH. You will be prompted for your SUNet ID and password, and will go through the regular two-step authentication process.

    The Sherlock OnDemand Dashboard will then open. From there, you can use the menus across the top of the page to manage files, get a shell on Sherlock, submit jobs or open interactive applications such as Jupyter Notebooks or RStudio sessions.

    To end your Sherlock OnDemand session, click on the \"Log Out\" link at the top right of the Dashboard window and close your browser.

    "},{"location":"docs/user-guide/ondemand/#getting-a-shell","title":"Getting a shell","text":"

    You can get shell access to Sherlock by choosing Clusters > Sherlock Shell Access from the top menu in the OnDemand Dashboard.

    In the window that will open, you'll be logged in to one of Sherlock's login nodes, exactly as if you were using SSH to connect. Except you don't need to install any SSH client on your local machine, configure Kerberos or deal with your SSH client configuration to avoid endless two-factor prompts. How cool is that?

    "},{"location":"docs/user-guide/ondemand/#managing-files","title":"Managing files","text":"

    To create, edit or move files, click on the Files menu from the Dashboard page. A drop-down menu will appear, listing your most common storage locations on Sherlock: $HOME, $GROUP_HOME, $SCRATCH, $GROUP_SCRATCH, and all Oak storage you have access to, including your main $OAK1. Any rclone remotes you create on Sherlock to connect to cloud storage will appear here as well.

    Choosing one of the file spaces opens the File Explorer in a new browser tab. The files in the selected directory are listed.

    There are two sets of buttons in the File Explorer.

    • Under the three vertical dots menu next to each filename: Those buttons allow you to View, Edit, Rename, Download, or Delete a file.

    • At the top of the window, on the right side:

      Button Function Open in Terminal Open a terminal window on Sherlock in a new browser tab Refresh Refresh the list of directory contents New File Create a new, empty file New Directory Create a new sub-directory Upload Copy a file from your local machine to Sherlock Download Download selected files to your local machine Copy/Move Copy or move selected files (after moving to a different directory) Delete Delete selected files Change directory Change your current working directory Copy path Copy the current working directory path to your clipboard Show Dotfiles Toggle the display of dotfiles (files starting with a ., which are usually hidden) Show Owner/Mode Toggle the display of owner and permission settings
    "},{"location":"docs/user-guide/ondemand/#creating-and-editing-jobs","title":"Creating and editing jobs","text":"

    You can create new job scripts, edit existing scripts, and submit them to the scheduler through the Sherlock OnDemand interface.

    From the top menus in the Dashboard, choose Jobs > Job Composer. A Job Composer window will open. There are two tabs at the top: Jobs and Templates.

    In the Jobs tab, you'll find a list of the job you've submitted through OnDemand. The Templates tab will allow you to define your own job templates.

    "},{"location":"docs/user-guide/ondemand/#creating-a-new-job-script","title":"Creating a new job script","text":"

    To create a new job script. you'll need to follow the steps below.

    "},{"location":"docs/user-guide/ondemand/#select-a-template","title":"Select a template","text":"

    Go to the Jobs tab in the Jobs Composer interface. You'll find a default template there: \"Simple Sequential Job\".

    To create a new job script, click the blue New Job > From Default Template button in the upper left. You'll see a green message at the top of the page indicating: \"Job was successfully created\".

    At the right of the Jobs page, you can see the Job Details, including the location of the script and the script name (by default, main_job.sh). Under that, you will see the contents of the job script in a section named Submit Script.

    "},{"location":"docs/user-guide/ondemand/#edit-the-job-script","title":"Edit the job script","text":"

    You'll need to edit the job script, so it contains the commands and workflow that you want to submit to the scheduler.

    If you need more resources than the defaults, you must include options to change them in the job script. For more details, see the Running jobs section.

    You can edit the script in several ways:

    • click the blue Edit Files button at the top of the Jobs tab in the Jobs Composer window,
    • in the Jobs tab in the Jobs Composer window, find the Submit Script section at the bottom right. Click the blue Open Editor button.

    After you save the file, the editor window remains open, but if you return to the Jobs Composer window, you will see that the content of your script has changed.

    "},{"location":"docs/user-guide/ondemand/#edit-the-job-options","title":"Edit the job options","text":"

    In the Jobs tab in the Jobs Composer window, click the blue Job Options button. The options for the selected job such as name, the job script to run, and the account it run under are displayed and can be edited. Click Save or Cancel to return to the job listing.

    "},{"location":"docs/user-guide/ondemand/#submitting-jobs","title":"Submitting jobs","text":"

    To submit a job, select in in the Jobs tab in the Jobs Composer page. Click the green Submit button to submit the selected job. A message at the top of the window shows whether the job submission was successful or not. If it is not, you can edit the job script or options and resubmit. When the job is submitted successfully, the status of the job in the Jobs Composer window will change to Queued or Running. When the job completes, the status will change to Completed.

    "},{"location":"docs/user-guide/ondemand/#monitoring-jobs","title":"Monitoring jobs","text":"

    From the Dashboard page, The Jobs > Active Jobs top-level menu will bring you to a live view of Sherlock's scheduler queue. You'll be able to see all the jobs currently in queue, including running and pending jobs, as well as some details about individual jobs.

    At the bottom of the detailed view, you'll find two button that will bring you to the directory where that job's files are located, either in the File Manager or in a Shell session.

    "},{"location":"docs/user-guide/ondemand/#interactive-applications","title":"Interactive applications","text":"

    One of the main features of Sherlock OnDemand is the ability to run interactive applications directly from the web interface, without leaving your web browser.

    "},{"location":"docs/user-guide/ondemand/#jupyter-notebooks","title":"Jupyter Notebooks","text":"

    You can run Jupyter Notebooks (using Python, Julia or other languages) through Sherlock OnDemand.

    Some preliminary setup may be required

    Before running your first Jupyter Notebook with IJulia, you'll need to run the following steps (this only needs to be done once):

    $ ml julia\n$ julia\njulia> using Pkg;\njulia> Pkg.add(\"IJulia\")\n

    When you see the message that IJulia has been installed, you can end your interactive session.

    To start a Jupyter session from Sherlock OnDemand:

    1. Select Interactive Apps > Jupyter Notebook from the top menu in the Dashboard page.

    2. In the screen that opens, specify the different parameters for your job (time limit, number of nodes, CPUs, partition to use, etc.). You can also choose to be notified by email when your notebook starts.

    1. Click the blue Launch button to start your JupyterHub session. You may have to wait in the queue for resources to become available for you.

    2. When your session starts, you can click on the blue Connect to Jupyter button to open your Jupyter Notebook. The Dashboard window will display information about your Jupyter session, including the name of the compute node it is running on, when it started, and how much time remains.

    3. In your new Jupyter Notebook tab, you'll see 3 tabs: Files, Running and Clusters.

    By default, you are in the Files tab; that displays the contents of your $HOME directory on Sherlock. You can navigate through your files there.

    Under the Running tab, you will see the list of all the notebooks or terminal sessions that you have currently running.

    1. You can now start a Jupyter Notebook:

      1. To open an existing Jupyter Notebook, which is already stored on Sherlock, navigate to its location in the Files tab and click on its name. A new window running the notebook will open.
      2. To create a new Jupyter Notebook, click on the New button at the top right of the file listing, and choose the kernel of your choice from the drop down.

    To terminate your Jupyter Notebook session, go back to the Dashboard, and click on the My Interactive Sessions in the top menu. This will bring you to a page listing all your currently active interactive session. Identify the one you'd like to terminate and click on the red Cancel button.

    "},{"location":"docs/user-guide/ondemand/#jupyterlab","title":"JupyterLab","text":"

    To run JupyterLab via Sherlock OnDemand:

    1. Select Interactive Apps > JupyterLab from the top menu in the Dashboard page.

    2. In the screen that opens, specify the different parameters for your job (time limit, number of nodes, CPUs, partition to use, etc.). You can also choose to be notified by email when your session starts.

    3. Click the blue Launch button to start your JupyterLab session. You may have to wait in the queue for resources to become available.

    4. When your session starts, click the blue Connect to JupyterLab button. A new window opens with the JupyterLab interface.

    5. The first time you connect to JupyterLab via Sherlock OnDemand, you'll see 2 tabs: Files and Launcher.

    The Files tab displays the contents of your $HOME directory on Sherlock. You can navigate through your files there.

    In the Launcher tab, you will have the option to create a new Jupyter Notebook new Console session by clicking the tile showing the kernel of your choice. You can also open the Terminal or a text editor for a variety of file types by clicking the corresponding tile.

    To create a new kernel for IJulia:

    1. In the Launcher, click the Terminal tile in the \"Other\" section.

    2. In the Terminal, run the following commands:

      $ ml julia\n$ julia\njulia> using Pkg;\njulia> Pkg.add(\"IJulia\")\n
    3. Open a new Launcher tab by clicking the + sign next to your open Terminal tab. Julia will now be listed in the \"Notebook\" and \"Console\" sections as an available kernel.

    To create a custom kernel for a virtual environment using Python 3.x:

    1. In a shell session, activate your environment and run the following:

      $ pip3 install ipykernel\n$ python3 -m ipykernel install --user --name env --display-name \"My Env\"\n

      This will create a kernel for the environment env. It will appear as My Env in the JupyterLab Launcher.

      Creating a custom kernel for a Python 2.x environment

      When working with a Python 2.x environment, use the python/pip commands instead.

    2. The custom kernel will now be listed as option in the \"Notebook\" and \"Console\" sections in the JupyterLab Launcher. To start a Jupyter Notebook using your virtual environment, click on the tile for that kernel.

      Creating a custom kernel for a conda environment

      In order to use a kernel created from a conda environment, you must unload the python and py-jupyterlab modules from your JupyterLab session. This can be done using the JupyterLab Lmod extension. To use the Lmod extension, select the bottom tab in the left side menu of your JupyterLab window. You may also need to restart the kernel for your notebook or console.

    "},{"location":"docs/user-guide/ondemand/#matlab","title":"MATLAB","text":"

    To run MATLAB via Sherlock OnDemand:

    1. Select Interactive Apps > MATLAB from the top menu in the Dashboard page.

    2. In the screen that opens, specify the different parameters for your job (time limit, number of nodes, CPUs, partition to use, etc.). You can also choose to be notified by email when your session starts.

    3. Click the blue Launch button to start your MATLAB session. You may have to wait in the queue for resources to become available.

    4. When your session starts, click the blue Connect to MATLAB button. A new window opens with the MATLAB interface.

    "},{"location":"docs/user-guide/ondemand/#rstudio","title":"RStudio","text":"

    To run RStudio via Sherlock OnDemand:

    1. Select Interactive Apps > RStudio Server from the top menu in the Dashboard page.

    2. In the screen that opens, specify the different parameters for your job (time limit, number of nodes, CPUs, partition to use, etc.). You can also choose to be notified by email when your session starts.

    3. Click the blue Launch button to start your RStudio session. You may have to wait in the queue for resources to become available.

    4. When your session starts, click the blue Connect to RStudio Server button. A new window opens with the RStudio interface.

    Installing packages in RStudio

    You may encounter errors while installing R packages within RStudio. First try installing R packages in a shell session on the Sherlock command line. See our R packages documentation for more information.

    "},{"location":"docs/user-guide/ondemand/#tensorboard","title":"TensorBoard","text":"

    To run TensorBoard via Sherlock OnDemand:

    1. Select Interactive Apps > TensorBoard from the top menu in the Dashboard page.

    2. In the screen that opens, specify the different parameters for your job (time limit, number of nodes, CPUs, partition to use, etc.). You can also choose to be notified by email when your session starts.

    3. Click the blue Launch button to start your TensorBoard session. You may have to wait in the queue for resources to become available.

    4. When your session starts, click the blue Connect to TensorBoard button. A new window opens with the TensorBoard interface.

    "},{"location":"docs/user-guide/ondemand/#vs-code","title":"VS Code","text":"

    You can use VS Code on Sherlock through the code-server interactive app.

    Using your local VS Code with remote SSH

    Connecting to Sherlock from VS Code on your local machine is not supported at this time due to a known issue with the closed-source \"Remote SSH\" extension.

    To start a VS Code session via Sherlock OnDemand:

    1. Select Interactive Apps > code-server from the top menu in the Dashboard page.

    2. In the screen that opens, specify the different parameters for your job (time limit, number of nodes, CPUs, partition to use, etc.). You can also choose to be notified by email when your session starts.

    3. Click the blue Launch button to start your code-server session. You may have to wait in the queue for resources to become available.

    4. When your session starts, click the blue Connect to code-server button. A new window opens with the code-server interface.

    "},{"location":"docs/user-guide/ondemand/#support","title":"Support","text":"

    If you are experiencing issues with Sherlock or your interactive session, you can contact us directly from Sherlock OnDemand.

    To submit a ticket about Sherlock or Sherlock OnDemand in general:

    1. Select Help -> Submit Support Ticket from the top menu in the Dashboard page.

    2. In the screen that opens, complete the Support Ticket form. When applicable, please provide:

      • the full path to any files involved in your question or problem,

      • the command(s) you ran, and/or the job submission script(s) you used,

      • the exact, entire error message (or trace) you received.

    3. Click the blue Submit support ticket form. Research Computing support will respond to you as soon as we are able.

    To submit a ticket about your current or recent interactive session:

    1. Select My Interactive Sessions from the top menu in the Dashboard page.

    2. In the screen that opens, find the card for the session you need help with. Active sessions will have a green header, and past sessions will have a gray header. Click that card's Submit support ticket link to open the Support Ticket form.

    3. Complete the Support Ticket form. When applicable, please provide:

      • the full path to any files involved in your question or problem,

      • the command(s) you ran, and/or the job submission script(s) you used,

      • the exact, entire error message (or trace) you received.

    4. Click the blue Submit support ticket form. Research Computing support will respond to you as soon as we are able.

    1. if you have access to the Oak storage system.\u00a0\u21a9

    "},{"location":"docs/user-guide/running-jobs/","title":"Running jobs","text":"","tags":["slurm"]},{"location":"docs/user-guide/running-jobs/#login-nodes","title":"Login nodes","text":"

    Login nodes are not for computing

    Login nodes are shared among many users and therefore must not be used to run computationally intensive tasks. Those should be submitted to the scheduler which will dispatch them on compute nodes.

    The key principle of a shared computing environment is that resources are shared among users and must be scheduled. It is mandatory to schedule work by submitting jobs to the scheduler on Sherlock. And since login nodes are a shared resource, they must not be used to execute computing tasks.

    Acceptable use of login nodes include:

    • lightweight file transfers,
    • script and configuration file editing,
    • job submission and monitoring.

    Resource limits are enforced

    To minimize disruption and ensure a comfortable working environment for users, resource limits are enforced on login nodes, and processes started there will automatically be terminated if their resource usage (including CPU time, memory and run time) exceed those limits.

    ","tags":["slurm"]},{"location":"docs/user-guide/running-jobs/#slurm-commands","title":"Slurm commands","text":"

    Slurm allows requesting resources and submitting jobs in a variety of ways. The main Slurm commands to submit jobs are listed in the table below:

    Command Description Behavior salloc Request resources and allocates them to a job Starts a new shell, but does not execute anything srun Request resources and runs a command on the allocated compute node(s) Blocking command: will not return until the job ends sbatch Request resources and runs a script on the allocated compute node(s) Asynchronous command: will return as soon as the job is submitted","tags":["slurm"]},{"location":"docs/user-guide/running-jobs/#interactive-jobs","title":"Interactive jobs","text":"","tags":["slurm"]},{"location":"docs/user-guide/running-jobs/#dedicated-nodes","title":"Dedicated nodes","text":"

    Interactive jobs allow users to log in to a compute node to run commands interactively on the command line. They could be an integral part of an interactive programming and debugging workflow. The simplest way to establish an interactive session on Sherlock is to use the sh_dev command:

    $ sh_dev\n

    This will open a login shell using one core and 4 GB of memory on one node for one hour. The sh_dev sessions run on dedicated compute nodes. This ensures minimal wait times when you need to access a node for testing script, debug code or any kind of interactive work.

    sh_dev also provides X11 forwarding via the submission host (typically the login node you're connected to) and can thus be used to run GUI applications.

    ","tags":["slurm"]},{"location":"docs/user-guide/running-jobs/#compute-nodes","title":"Compute nodes","text":"

    If you need more resources1, you can pass options to sh_dev, to request more CPU cores, more nodes, or even run in a different partition. sh_dev -h will provide more information:

    $ sh_dev -h\nsh_dev: start an interactive shell on a compute node.\n\nUsage: sh_dev [OPTIONS]\n    Optional arguments:\n        -c      number of CPU cores to request (OpenMP/pthreads, default: 1)\n        -n      number of tasks to request (MPI ranks, default: 1)\n        -N      number of nodes to request (default: 1)\n        -m      memory amount to request (default: 4GB)\n        -p      partition to run the job in (default: dev)\n        -t      time limit (default: 01:00:00)\n        -r      allocate resources from the named reservation (default: none)\n        -J      job name (default: sh_dev)\n        -q      quality of service to request for the job (default: normal)\n\n    Note: the default partition only allows for limited amount of resources.\n    If you need more, your job will be rejected unless you specify an\n    alternative partition with -p.\n

    Another way to get an interactive session on a compute node is to use srun to execute a shell through the scheduler. For instance, to start a bash session on a compute node, with the default resource requirements (one core for 2 hours), you can run:

    $ srun --pty bash\n

    The main advantage of this approach is that it will allow you to specify the whole range of submission options that sh_dev may not support.

    Finally, if you prefer to submit an existing job script or other executable as an interactive job, you can use the salloc command:

    $ salloc script.sh\n

    If you don't provide a command to execute, salloc will start a Slurm job and allocate resources for it, but it will not automatically connect you to the allocated node(s). It will only start a new shell on the same node you launched salloc from, and set up the appropriate $SLURM_* environment variables. So you will typically need to look at them to see what nodes have been assigned to your job. For instance:

    $ salloc\nsalloc: Granted job allocation 655914\n$ echo $SLURM_NODELIST\nsh02-01n55\n$ ssh sh02-01n55\n[...]\nsh02-01n55 ~ $\n
    ","tags":["slurm"]},{"location":"docs/user-guide/running-jobs/#connecting-to-nodes","title":"Connecting to nodes","text":"

    Login to compute nodes

    Users are not allowed to login to compute nodes unless they have a job running there.

    If you SSH to a compute node without any active job allocation, you'll be greeted by the following message:

    $ ssh sh02-01n01\nAccess denied by pam_slurm_adopt: you have no active jobs on this node\nConnection closed\n$\n

    Once you have a job running on a node, you can SSH directly to it and run additional processes2, or observe how you application behaves, debug issues, and so on.

    The salloc command supports the same parameters as sbatch, and can override any default configuration. Note that any #SBATCH directive in your job script will not be interpreted by salloc when it is executed in this way. You must specify all arguments directly on the command line for them to be taken into account.

    ","tags":["slurm"]},{"location":"docs/user-guide/running-jobs/#batch-jobs","title":"Batch jobs","text":"

    It's easy to schedule batch jobs on Sherlock. A job is simply an instance of your program, for example your R, Python or Matlab script that is submitted to and executed by the scheduler (Slurm). When you submit a job with the sbatch command it's called a batch job and it will either run immediately or will pend (wait) in the queue.

    The length of time a job will pend is determined by several factors; how many other jobs are in the queue ahead or your job and how many resources your job is requesting are the most important factors. One key principle when requesting resources is to always try to request as few resources as you need to get your job done. This will ensure your job pends in the queue for as little time as necessary. To get a rough idea of what resources are needed, you can profile your code/jobs in an sh_dev session in real-time with htop, nvtop, sacct etc. The basic concept is to tell the scheduler what resources your job needs and how long is should run. These resources are:

    CPUs: How many CPUs the program you are calling the in the sbatch script needs, unless it can utilize multiple CPUs at once you should request a single CPU. Check your code's documentation or try running in an interactive session with sh_dev and run htop if you are unsure.

    GPUs: If your code is GPU enabled, how many GPUs does your code need? Use the diagnostic tool nvtop to see if your code is capable of running on multiple GPUs and how much GPU memory it's using in real-time.

    memory (RAM): How much memory your job will consume. Some things to consider, will it load a large file or matrix into memory? Does it consume a lot of memory on your laptop? Often the default memory is sufficient for many jobs.

    time: How long will it take for your code to run to completion?

    partition: What set of compute nodes on Sherlock will you run on, normal, gpu, owners, bigmem? Use the sh_part command to see what partitions you are allowed to run on. The default partition on Sherlock is the normal partition.

    Next, you tell the scheduler what your job should should do: load modules and run your code. Note that any logic you can code into a bash script with the bash scripting language can also be coded into an sbatch script.

    This example job, will run the Python script mycode.py for 10 minutes on the normal partition using 1 CPU and 8 GB of memory. To aid in debugging we are naming this job \"test_job\" and appending the Job ID (%j) to the two output files that Slurm creates when a job is run. The output files are written to the directory in which you launched your job in, you can also specify a different path. One file will contain any errors and the other will contain non-error output. Look in these 2 files ending in .err and .out for useful debugging information and error output.

    Because it's a Python 3 script that uses some Numpy code, we need to load the python/3.6.1 and the py-numpy/1.19.2_py36 modules. The Python script is then called just as you would on the command line at the end of the sbatch script:

    sbatch script:

    #!/usr/bin/bash\n#SBATCH --job-name=test_job\n#SBATCH --output=test_job.%j.out\n#SBATCH --error=test_job.%j.err\n#SBATCH --time=10:00\n#SBATCH -p normal\n#SBATCH -c 1\n#SBATCH --mem=8GB\nmodule load python/3.6.1\nmodule load py-numpy/1.19.2_py36\npython3 mycode.py\n
    Create and edit the sbatch script with a text editor like vim/nano or the OnDemand file manager. Then save the file, in this example we call it \"test.sbatch\".

    Submit to the scheduler with the sbatch command:

    $sbatch test.sbatch\n
    Monitor your job and job ID in the queue with the squeue command:

    $squeue -u $USER\n   JOBID     PARTITION     NAME     USER    ST       TIME  NODES  NODELIST(REASON)\n   44915821    normal    test_job  <userID>  PD       0:00      1 (Priority)\n

    Notice that the jobs state (ST) in pending (PD)

    Once the job starts to run that will change to R:

    $squeue -u $USER\n    JOBID     PARTITION     NAME     USER     ST      TIME  NODES   NODELIST(REASON)\n    44915854    normal test_job  <userID>     R      0:10     1     sh02-01n49\n

    Here you can see it has been running (R) on the compute node sh02-01n49 for 10 seconds. While your job is running you have ssh access to that node and can run diagnostic tools such as htop and nvtop in order to monitor your job's memory and CPU/GPU utilization in real-time. You can also manage this job based on the JobID assigned to it (44915854). For example the job can be cancelled with the scancel command.

    ","tags":["slurm"]},{"location":"docs/user-guide/running-jobs/#resource-requests","title":"Resource requests","text":"

    To get a better idea of the amount of resources your job will need, you can use the ruse command, available as a module:

    $ module load system ruse\n

    ruse is a command line tool developed by Jan Moren to measure a process' resource usage. It periodically measures the resource use of a process and its subprocesses, and can help you find out how much resource to allocate to your job. It will determine the actual memory, execution time and cores that individual programs or MPI applications need to request in their job submission options.

    ruse periodically samples the process and its subprocesses and keeps track of the CPU, time and maximum memory use. It also optionally records the sampled values over time. The purpose or Ruse is not to profile processes in detail, but to follow jobs that run for many minutes, hours or days, with no performance impact and without changing the measured application in any way.

    You'll find complete documentation and details about ruse's usage on the project webpage, but here are a few useful examples.

    ","tags":["slurm"]},{"location":"docs/user-guide/running-jobs/#sizing-a-job","title":"Sizing a job","text":"

    In its simplest form, ruse can help discover how much resources a new script or application will need. For instance, you can start a sizing session on a compute node with an overestimated amount of resources, and start your application like this:

    $ ruse ./myapp\n

    This will generate a <myapp>-<pid>/ruse output file in the current directory, looking like this:

    Time:           02:55:47\nMemory:         7.4 GB\nCores:          4\nTotal_procs:    3\nActive_procs:   2\nProc(%): 99.9  99.9\n

    It shows that myapp:

    • ran for almost 3 hours
    • used a little less than 8B of memory
    • had 4 cores available,
    • spawned 3 processes, among which at most 2 were active at the same time,
    • that both active processes each used 99.9% of a CPU core

    This information could be useful in tailoring the job resource requirements to its exact needs, making sure that the job won't be killed for exceeding one of its resource limits, and that the job won't have to wait too long in queue for resources that it won't use. The corresponding job request could look like this:

    #SBATCH --time 3:00:00\n#SBATCH --mem 8GB\n#SBATCH --cpus-per-task 2\n
    ","tags":["slurm"]},{"location":"docs/user-guide/running-jobs/#verifying-a-jobs-usage","title":"Verifying a job's usage","text":"

    It's also important to verify that applications, especially parallel ones, stay in the confines of the resources they've requested. For instance, a number of parallel computing libraries will make the assumption that they can use all the resources on the host, will automatically determine the number of physical CPU cores present on the compute node, and start as many processes. This could be a significant issue if the job requested less CPUs, as more processes will be constrained on less CPU cores, which will result in node overload and degraded performance for the application.

    To avoid this, you can start your application with ruse and report usage for each time step specified with -t. You can also request the reports to be displayed directly on stdout rather than stored in a file.

    For instance, this will report usage every 10 seconds:

    $ ruse -s -t10 --stdout ./myapp\n   time         mem   processes  process usage\n  (secs)        (MB)  tot  actv  (sorted, %CPU)\n     10        57.5    17    16   33  33  33  25  25  25  25  25  25  25  25  20  20  20  20  20\n     20        57.5    17    16   33  33  33  25  25  25  25  25  25  25  25  20  20  20  20  20\n     30        57.5    17    16   33  33  33  25  25  25  25  25  25  25  25  20  20  20  20  20\n\nTime:           00:00:30\nMemory:         57.5 MB\nCores:          4\nTotal_procs:   17\nActive_procs:  16\nProc(%): 33.3  33.3  33.2  25.0  25.0  25.0  25.0  25.0  25.0  24.9  24.9  20.0  20.0  20.0  20.0  19.9\n

    Here, we can see that despite having being allocated 4 CPUs, the application started 17 threads, 16 of which were active running intensive computations, with the unfortunate consequence that each process could only use a fraction of a CPU.

    In that case, to ensure optimal performance and system operation, it's important to modify the application parameters to make sure that it doesn't start more computing processes than the number of requested CPU cores.

    ","tags":["slurm"]},{"location":"docs/user-guide/running-jobs/#available-resources","title":"Available resources","text":"

    Whether you are submitting a batch job, or an or interactive job, it's important to know the resources that are available to you. For this reason, we provide sh_part, a command-line tool to help answer questions such as:

    • which partitions do I have access to?
    • how many jobs are running on them?
    • how many CPUs can I use?
    • where should I submit my jobs?

    sh_part can be executed on any login or compute node to see what partitions are available to you, and its output looks like this:

    $ sh_part\n     QUEUE STA   FREE  TOTAL   FREE  TOTAL RESORC  OTHER MAXJOBTIME    CORES       NODE   GRES\n PARTITION TUS  CORES  CORES  NODES  NODES PENDNG PENDNG  DAY-HR:MN    /NODE     MEM-GB (COUNT)\n    normal   *    153   1792      0     84    23k    127    7-00:00    20-24    128-191 -\n    bigmem         29     88      0      2      0      8    1-00:00    32-56   512-3072 -\n       dev         31     40      0      2      0      0    0-02:00       20        128 -\n       gpu         47    172      0      8    116      1    7-00:00    20-24    191-256 gpu:4(S:0-1)(2),gpu:4(S:0)(6)\n

    The above example shows four possible partitions where jobs can be submitted: normal, bigmem, dev, or gpu. It also provides additional information such as the maximum amount of time allowed in each partition (MAXJOBTIME), the number of other jobs already in queue, along with the ranges of memory available on nodes in each partition.

    • in the QUEUE PARTITION column, the * character indicates the default partition.
    • the RESOURCE PENDING column shows the core count of pending jobs that are waiting on resources,
    • the OTHER PENDING column lists core counts for jobs that are pending for other reasons, such as licenses, user, group or any other limit,
    • the GRES column shows the number and type of Generic RESsources available in that partition (typically, GPUs), which CPU socket they're available from, and the number of nodes that feature that specific GRES combination. So for instance, in the output above, gpu:4(S:0-1)(2) means that the gpu partition features 2 nodes with 4 GPUs each, and that those GPUs are accessible from both CPU sockets (S:0-1).
    ","tags":["slurm"]},{"location":"docs/user-guide/running-jobs/#recurring-jobs","title":"Recurring jobs","text":"

    Warning

    Cron tasks are not supported on Sherlock.

    Users are not allowed to create cron jobs on Sherlock, for a variety of reasons:

    • resources limits cannot be easily enforced in cron jobs, meaning that a single user can end up monopolizing all the resources of a login node,
    • no amount of resources can be guaranteed when executing a cron job, leading to unreliable runtime and performance,
    • user cron jobs have the potential of bringing down whole nodes by creating fork bombs, if they're not carefully crafted and tested,
    • compute and login nodes could be redeployed at any time, meaning that cron jobs scheduled there could go away without the user being notified, and cause all sorts of unexpected results,
    • cron jobs could be mistakenly scheduled on several nodes and run multiple times, which could result in corrupted files.

    As an alternative, if you need to run recurring tasks at regular intervals, we recommend the following approach: by using the --begin job submission option, and creating a job that resubmits itself once it's done, you can virtually emulate the behavior and benefits of a cron job, without its disadvantages: your task will be scheduled on a compute node, and use all of the resources it requested, without being impacted by anything else.

    Depending on your recurring job's specificities, where you submit it and the state of the cluster at the time of execution, the starting time of that task may not be guaranteed and result in a delay in execution, as it will be scheduled by Slurm like any other jobs. Typical recurring jobs, such as file synchronization, database updates or backup tasks don't require strict starting times, though, so most users find this an acceptable trade-off.

    The table below summarizes the advantages and inconvenients of each approach:

    Cron tasks Recurring jobs Authorized on Sherlock Dedicated resources for the task Persistent across node redeployments Unique, controlled execution Precise schedule","tags":["slurm"]},{"location":"docs/user-guide/running-jobs/#recurrent-job-example","title":"Recurrent job example","text":"

    The script below presents an example of such a recurrent job, that would emulate a cron task. It will append a timestamped line to a cron.log file in your $HOME directory and run every 7 days.

    cron.sbatch
    #!/bin/bash\n#SBATCH --job-name=cron\n#SBATCH --begin=now+7days\n#SBATCH --dependency=singleton\n#SBATCH --time=00:02:00\n#SBATCH --mail-type=FAIL\n\n\n## Insert the command to run below. Here, we're just storing the date in a\n## cron.log file\ndate -R >> $HOME/cron.log\n\n## Resubmit the job for the next execution\nsbatch $0\n

    If the job payload (here the date command) fails for some reason and generates and error, the job will not be resubmitted, and the user will be notified by email.

    We encourage users to get familiar with the submission options used in this script by giving a look at the sbatch man page, but some details are given below:

    Submission\u00a0option\u00a0or\u00a0command Explanation --job-name=cron makes it easy to identify the job, is used by the --dependency=singleton option to identify identical jobs, and will allow cancelling the job by name (because its jobid will change each time it's submitted) --begin=now+7days will instruct the scheduler to not even consider the job for scheduling before 7 days after it's been submitted --dependency=singleton will make sure that only one cron job runs at any given time --time=00:02:00 runtime limit for the job (here 2 minutes). You'll need to adjust the value depending on the task you need to run (shorter runtime requests usually result in the job running closer to the clock mark) --mail-type=FAIL will send an email notification to the user if the job ever fails sbatch $0 will resubmit the job script by calling its own name ($0) after successful execution

    You can save the script as cron.sbatch or any other name, and submit it with:

    $ sbatch cron.sbatch\n

    It will start running for the first time 7 days after you submit it, and it will continue to run until you cancel it with the following command (using the job name, as defined by the --job-name option):

    $ scancel -n cron\n
    ","tags":["slurm"]},{"location":"docs/user-guide/running-jobs/#persistent-jobs","title":"Persistent jobs","text":"

    Recurring jobs described above are a good way to emulate cron jobs on Sherlock, but don't fit all needs, especially when a persistent service is required.

    For instance, workflows that require a persistent database connection would benefit from an ever-running database server instance. We don't provide persistent database services on Sherlock, but instructions and examples on how to submit database server jobs are provided for MariaDB or PostgreSQL.

    In case those database instances need to run pretty much continuously (within the limits of available resources and runtime maximums), the previous approach described in the recurring jobs section could fall a bit short. Recurring jobs are mainly designed for jobs that have a fixed execution time and don't reach their time limit, but need to run at given intervals (like synchronization or backup jobs, for instance).

    Because a database server process will never end within the job, and will continue until the job reaches its time limit, the last resubmission command (sbatch $0) will actually never be executed, and the job won't be resubmitted.

    To work around this, a possible approach is to catch a specific signal sent by the scheduler at a predefined time, before the time limit is reached, and then re-queue the job. This is easily done with the Bash trap command, which can be instructed to re-submit a job when it receives the SIGUSR1 signal.

    Automatically resubmitting a job doesn't make it immediately runnable

    Jobs that are automatically re-submitted using this technique won't restart right away: the will get back in queue and stay pending until their execution conditions (priority, resources, usage limits...) are satisfied.

    ","tags":["slurm"]},{"location":"docs/user-guide/running-jobs/#persistent-job-example","title":"Persistent job example","text":"

    Here's the recurring job example from above, modified to:

    1. instruct the scheduler to send a SIGUSR1 signal to the job 90 seconds3 before reaching its time limit (with the #SBATCH --signal option),
    2. re-submit itself upon receiving that SIGUSR1 signal (with the trap command)
    persistent.sbatch
    #!/bin/bash\n#\n#SBATCH --job-name=persistent\n#SBATCH --dependency=singleton\n#SBATCH --time=00:05:00\n#SBATCH --signal=B:SIGUSR1@90\n\n# catch the SIGUSR1 signal\n_resubmit() {\n    ## Resubmit the job for the next execution\n    echo \"$(date): job $SLURM_JOBID received SIGUSR1 at $(date), re-submitting\"\n    sbatch $0\n}\ntrap _resubmit SIGUSR1\n\n## Insert the command to run below. Here, we're just outputting the date every\n## 10 seconds, forever\n\necho \"$(date): job $SLURM_JOBID starting on $SLURM_NODELIST\"\nwhile true; do\n    echo \"$(date): normal execution\"\n    sleep 60\ndone\n

    Long running processes need to run in the background

    If your job's actual payload (the application or command you want to run) is running continuously for the whole duration of the job, it needs to be executed in the background, so the trap can be processed.

    To run your application in the background, just add a & at the end of the command and then add a wait statement at the end of the script, to make the shell wait until the end of the job.

    For instance, if you were to run a PostgreSQL database server, the while true ... done loop in the previous example could be replaced by something like this:

    postgres -i -D $DB_DIR &\nwait\n
    ","tags":["slurm"]},{"location":"docs/user-guide/running-jobs/#persistent-jobid","title":"Persistent $JOBID","text":"

    One potential issue with having a persistent job re-submit itself when it reaches its runtime limit is that it will get a different $JOBID each time it's (re-)submitted.

    This could be particularly challenging when other jobs depend on it, like in the database server scenario, where client jobs would need to start only if the database server is running. This can be achieved with job dependencies, but those dependencies have to be expressed using jobids, so having the server job's id changing at each re-submission will be difficult to handle.

    To avoid this, the re-submission command (sbatch $0) can be replaced by a re-queuing command:

    scontrol requeue $SLURM_JOBID\n

    The benefit of that change is that the job will keep the same $JOBID across all re-submissions. And now, dependencies can be added to other jobs using that specific $JOBID, without having to worry about it changing. And there will be only one $JOBID to track for that database server job.

    The previous example can then be modified as follows:

    persistent.sbatch
    #!/bin/bash\n#SBATCH --job-name=persistent\n#SBATCH --dependency=singleton\n#SBATCH --time=00:05:00\n#SBATCH --signal=B:SIGUSR1@90\n\n# catch the SIGUSR1 signal\n_requeue() {\n    echo \"$(date): job $SLURM_JOBID received SIGUSR1, re-queueing\"\n    scontrol requeue $SLURM_JOBID\n}\ntrap '_requeue' SIGUSR1\n\n## Insert the command to run below. Here, we're just outputting the date every\n## 60 seconds, forever\n\necho \"$(date): job $SLURM_JOBID starting on $SLURM_NODELIST\"\nwhile true; do\n    echo \"$(date): normal execution\"\n    sleep 60\ndone\n

    Submitting that job will produce an output similar to this:

    Mon Nov  5 10:30:59 PST 2018: Job 31182239 starting on sh-06-34\nMon Nov  5 10:30:59 PST 2018: normal execution\nMon Nov  5 10:31:59 PST 2018: normal execution\nMon Nov  5 10:32:59 PST 2018: normal execution\nMon Nov  5 10:33:59 PST 2018: normal execution\nMon Nov  5 10:34:59 PST 2018: Job 31182239 received SIGUSR1, re-queueing\nslurmstepd: error: *** JOB 31182239 ON sh-06-34 CANCELLED AT 2018-11-05T10:35:06 DUE TO JOB REQUEUE ***\nMon Nov  5 10:38:11 PST 2018: Job 31182239 starting on sh-06-34\nMon Nov  5 10:38:11 PST 2018: normal execution\nMon Nov  5 10:39:11 PST 2018: normal execution\n

    The job runs for 5 minutes, then received the SIGUSR1 signal, is re-queued, restarts for 5 minutes, and so on, until it's properly scancelled.

    1. The dedicated partition that sh_dev uses by default only allows up to 2 cores and 8 GB or memory per user at any given time. So if you need more resources for your interactive session, you may have to specify a different partition.\u00a0\u21a9

    2. Please note that your SSH session will be attached to your running job, and that resources used by that interactive shell will count towards your job's resource limits. So if you start a process using large amounts of memory via SSH while your job is running, you may hit the job's memory limits, which will trigger its termination.\u00a0\u21a9

    3. Due to the resolution of event handling by the scheduler, the signal may be sent up to 60 seconds earlier than specified.\u00a0\u21a9

    ","tags":["slurm"]},{"location":"docs/user-guide/troubleshoot/","title":"Troubleshooting","text":"

    Sherlock is a resource for research, and as such, it is in perpetual evolution, as hardware, applications, libraries, and modules are added, updated, and/or modified on a regular basis. Sometimes issues can appear where none existed before. When you find something missing or a behavior that seems odd, please let us know.

    "},{"location":"docs/user-guide/troubleshoot/#how-to-submit-a-support-request","title":"How to submit a support request","text":"

    Google it first!

    When encountering issues with software, if the misbehavior involves an error message, the first step should always be to look up the error message online. There's a good chance somebody stumbled upon the same hurdles before, and may even provide some fix or workaround.

    One of the most helpful Google searches is your_application sbatch. For example if you're having trouble submitting jobs or allocating resources (CPUs, time, memory) with Cell Ranger, search for cell ranger sbatch to see how others have successfully run your application on a cluster.

    If you're facing issues you can't figure out, we're here to help. Feel free to email us at srcc-support@stanford.edu, but please keep the following points in mind to ensure a timely and relevant response to your support requests.

    Please provide relevant information

    We need to understand the issue you're facing, and in most cases, we need to be able to reproduce it, so it could be diagnosed and addressed. Please make sure to provide enough information so we could help you in the best possible way.

    This typically involves providing the following information:

    • your SUNet ID,
    • some context about your problem (were you submitting a job, copying a file, compiling an application?),
    • if relevant, the full path to the files involved in your question or problem,
    • the name of node where you received the error (usually displayed in your command-line prompt),
    • the command(s) you ran, and/or the job submission script(s) you used,
    • the relevant job ID(s),
    • the exact, entire error message (or trace) you received.

    Error messages are critical

    This is very important. Without proper error messages, there is nothing we can do to help. And \"it doesn't work\" is not a proper error message. Also, please cut and paste the actual text of the output, commands, and error messages rather than screenshots in your tickets. That way it is much easier for us to try to replicate your errors.

    You can avoid email back and forth where we ask for all the relevant details, and thus delay the problem resolution, by providing all this information from the start. This will help us get to your problem immediately.

    "}]} \ No newline at end of file diff --git a/shell/index.html b/shell/index.html new file mode 100644 index 000000000..a7d419316 --- /dev/null +++ b/shell/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 000000000..b182ddec8 --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,228 @@ + + + + https://www.sherlock.stanford.edu/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/concepts/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/credits/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/glossary/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/orders/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/tags/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/advanced-topics/connection/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/advanced-topics/job-management/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/advanced-topics/node-features/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/getting-started/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/getting-started/connecting/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/getting-started/submitting/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/software/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/software/install/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/software/list/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/software/modules/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/software/containers/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/software/containers/apptainer/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/software/containers/singularity/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/software/using/R/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/software/using/anaconda/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/software/using/clustershell/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/software/using/julia/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/software/using/mariadb/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/software/using/matlab/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/software/using/perl/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/software/using/postgresql/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/software/using/python/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/software/using/quantum-espresso/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/software/using/rclone/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/software/using/schrodinger/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/software/using/spark/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/storage/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/storage/data-protection/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/storage/data-sharing/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/storage/data-transfer/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/storage/filesystems/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/tech/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/tech/facts/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/tech/status/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/user-guide/gpu/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/user-guide/ondemand/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/user-guide/running-jobs/ + 2024-05-20 + daily + + + https://www.sherlock.stanford.edu/docs/user-guide/troubleshoot/ + 2024-05-20 + daily + + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz new file mode 100644 index 000000000..cf65911b6 Binary files /dev/null and b/sitemap.xml.gz differ diff --git a/tags.json b/tags.json new file mode 100644 index 000000000..23b79fd33 --- /dev/null +++ b/tags.json @@ -0,0 +1 @@ +{"mappings": [{"item": {"url": "docs/advanced-topics/connection/", "title": "Connection options"}, "tags": ["connection"]}, {"item": {"url": "docs/advanced-topics/job-management/", "title": "Job management"}, "tags": ["slurm"]}, {"item": {"url": "docs/advanced-topics/node-features/", "title": "Node features"}, "tags": ["advanced", "slurm"]}, {"item": {"url": "docs/getting-started/connecting/", "title": "Connecting"}, "tags": ["connection"]}, {"item": {"url": "docs/getting-started/submitting/", "title": "Submitting jobs"}, "tags": ["slurm"]}, {"item": {"url": "docs/storage/data-transfer/", "title": "Data transfer"}, "tags": ["connection"]}, {"item": {"url": "docs/tech/", "title": "Technical specifications"}, "tags": ["tech"]}, {"item": {"url": "docs/tech/facts/", "title": "Facts"}, "tags": ["tech"]}, {"item": {"url": "docs/user-guide/running-jobs/", "title": "Running jobs"}, "tags": ["slurm"]}]} \ No newline at end of file