diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/404.html b/404.html new file mode 100644 index 00000000..f5adab5b --- /dev/null +++ b/404.html @@ -0,0 +1,2012 @@ + + + + + + + + + + + + + + + + + + + + + Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ +

404 - Not found

+ +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/CNAME b/CNAME new file mode 100644 index 00000000..066e85fe --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +docs.redbrick.dcu.ie \ No newline at end of file diff --git a/aperture/about/index.html b/aperture/about/index.html new file mode 100644 index 00000000..4e214b20 --- /dev/null +++ b/aperture/about/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/aperture/ansible/index.html b/aperture/ansible/index.html new file mode 100644 index 00000000..bbdc4c83 --- /dev/null +++ b/aperture/ansible/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/aperture/bastion-vm/index.html b/aperture/bastion-vm/index.html new file mode 100644 index 00000000..b791bbe2 --- /dev/null +++ b/aperture/bastion-vm/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/aperture/chell/index.html b/aperture/chell/index.html new file mode 100644 index 00000000..24797a3e --- /dev/null +++ b/aperture/chell/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/aperture/firewall/index.html b/aperture/firewall/index.html new file mode 100644 index 00000000..a35d072f --- /dev/null +++ b/aperture/firewall/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/aperture/glados/index.html b/aperture/glados/index.html new file mode 100644 index 00000000..39e97461 --- /dev/null +++ b/aperture/glados/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/aperture/icecast/index.html b/aperture/icecast/index.html new file mode 100644 index 00000000..1c89ff81 --- /dev/null +++ b/aperture/icecast/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/aperture/images/index.html b/aperture/images/index.html new file mode 100644 index 00000000..a4082535 --- /dev/null +++ b/aperture/images/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/aperture/index.html b/aperture/index.html new file mode 100644 index 00000000..d83b259c --- /dev/null +++ b/aperture/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/aperture/johnson/index.html b/aperture/johnson/index.html new file mode 100644 index 00000000..22d0b644 --- /dev/null +++ b/aperture/johnson/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/aperture/nomad/index.html b/aperture/nomad/index.html new file mode 100644 index 00000000..0a0038f1 --- /dev/null +++ b/aperture/nomad/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/aperture/vpn/index.html b/aperture/vpn/index.html new file mode 100644 index 00000000..0aad018f --- /dev/null +++ b/aperture/vpn/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/aperture/wheatley/index.html b/aperture/wheatley/index.html new file mode 100644 index 00000000..9f3fa5c1 --- /dev/null +++ b/aperture/wheatley/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 00000000..1cf13b9f Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/images/social/contact.png b/assets/images/social/contact.png new file mode 100644 index 00000000..358b33db Binary files /dev/null and b/assets/images/social/contact.png differ diff --git a/assets/images/social/hardware/aperture/about.png b/assets/images/social/hardware/aperture/about.png new file mode 100644 index 00000000..146004a6 Binary files /dev/null and b/assets/images/social/hardware/aperture/about.png differ diff --git a/assets/images/social/hardware/aperture/chell.png b/assets/images/social/hardware/aperture/chell.png new file mode 100644 index 00000000..cdfe7e00 Binary files /dev/null and b/assets/images/social/hardware/aperture/chell.png differ diff --git a/assets/images/social/hardware/aperture/glados.png b/assets/images/social/hardware/aperture/glados.png new file mode 100644 index 00000000..a6c48270 Binary files /dev/null and b/assets/images/social/hardware/aperture/glados.png differ diff --git a/assets/images/social/hardware/aperture/images.png b/assets/images/social/hardware/aperture/images.png new file mode 100644 index 00000000..e8f12b25 Binary files /dev/null and b/assets/images/social/hardware/aperture/images.png differ diff --git a/assets/images/social/hardware/aperture/index.png b/assets/images/social/hardware/aperture/index.png new file mode 100644 index 00000000..3a03f2ce Binary files /dev/null and b/assets/images/social/hardware/aperture/index.png differ diff --git a/assets/images/social/hardware/aperture/johnson.png b/assets/images/social/hardware/aperture/johnson.png new file mode 100644 index 00000000..c20fc46b Binary files /dev/null and b/assets/images/social/hardware/aperture/johnson.png differ diff --git a/assets/images/social/hardware/aperture/wheatley.png b/assets/images/social/hardware/aperture/wheatley.png new file mode 100644 index 00000000..fed56490 Binary files /dev/null and b/assets/images/social/hardware/aperture/wheatley.png differ diff --git a/assets/images/social/hardware/azazel.png b/assets/images/social/hardware/azazel.png new file mode 100644 index 00000000..7bd688f4 Binary files /dev/null and b/assets/images/social/hardware/azazel.png differ diff --git a/assets/images/social/hardware/index.png b/assets/images/social/hardware/index.png new file mode 100644 index 00000000..76cb6c60 Binary files /dev/null and b/assets/images/social/hardware/index.png differ diff --git a/assets/images/social/hardware/network/arse.png b/assets/images/social/hardware/network/arse.png new file mode 100644 index 00000000..766f4a12 Binary files /dev/null and b/assets/images/social/hardware/network/arse.png differ diff --git a/assets/images/social/hardware/network/cerberus.png b/assets/images/social/hardware/network/cerberus.png new file mode 100644 index 00000000..33c2ee98 Binary files /dev/null and b/assets/images/social/hardware/network/cerberus.png differ diff --git a/assets/images/social/hardware/network/index.png b/assets/images/social/hardware/network/index.png new file mode 100644 index 00000000..61e95ad0 Binary files /dev/null and b/assets/images/social/hardware/network/index.png differ diff --git a/assets/images/social/hardware/network/mordor.png b/assets/images/social/hardware/network/mordor.png new file mode 100644 index 00000000..4492d09a Binary files /dev/null and b/assets/images/social/hardware/network/mordor.png differ diff --git a/assets/images/social/hardware/network/switches.png b/assets/images/social/hardware/network/switches.png new file mode 100644 index 00000000..58bc44f6 Binary files /dev/null and b/assets/images/social/hardware/network/switches.png differ diff --git a/assets/images/social/hardware/nix/hardcase.png b/assets/images/social/hardware/nix/hardcase.png new file mode 100644 index 00000000..204b6358 Binary files /dev/null and b/assets/images/social/hardware/nix/hardcase.png differ diff --git a/assets/images/social/hardware/nix/icarus.png b/assets/images/social/hardware/nix/icarus.png new file mode 100644 index 00000000..1310d7c7 Binary files /dev/null and b/assets/images/social/hardware/nix/icarus.png differ diff --git a/assets/images/social/hardware/nix/motherlode.png b/assets/images/social/hardware/nix/motherlode.png new file mode 100644 index 00000000..2342adba Binary files /dev/null and b/assets/images/social/hardware/nix/motherlode.png differ diff --git a/assets/images/social/hardware/paphos.png b/assets/images/social/hardware/paphos.png new file mode 100644 index 00000000..8aeeea58 Binary files /dev/null and b/assets/images/social/hardware/paphos.png differ diff --git a/assets/images/social/hardware/pygmalion.png b/assets/images/social/hardware/pygmalion.png new file mode 100644 index 00000000..ba230bf7 Binary files /dev/null and b/assets/images/social/hardware/pygmalion.png differ diff --git a/assets/images/social/hardware/zeus.png b/assets/images/social/hardware/zeus.png new file mode 100644 index 00000000..e09f953a Binary files /dev/null and b/assets/images/social/hardware/zeus.png differ diff --git a/assets/images/social/index.png b/assets/images/social/index.png new file mode 100644 index 00000000..62894eac Binary files /dev/null and b/assets/images/social/index.png differ diff --git a/assets/images/social/procedures/ansible.png b/assets/images/social/procedures/ansible.png new file mode 100644 index 00000000..162d6000 Binary files /dev/null and b/assets/images/social/procedures/ansible.png differ diff --git a/assets/images/social/procedures/cheatsheet.png b/assets/images/social/procedures/cheatsheet.png new file mode 100644 index 00000000..a69d47c5 Binary files /dev/null and b/assets/images/social/procedures/cheatsheet.png differ diff --git a/assets/images/social/procedures/handover.png b/assets/images/social/procedures/handover.png new file mode 100644 index 00000000..897b06a2 Binary files /dev/null and b/assets/images/social/procedures/handover.png differ diff --git a/assets/images/social/procedures/index.png b/assets/images/social/procedures/index.png new file mode 100644 index 00000000..61b59ff2 Binary files /dev/null and b/assets/images/social/procedures/index.png differ diff --git a/assets/images/social/procedures/irc-ops.png b/assets/images/social/procedures/irc-ops.png new file mode 100644 index 00000000..ace287b9 Binary files /dev/null and b/assets/images/social/procedures/irc-ops.png differ diff --git a/assets/images/social/procedures/new-admins.png b/assets/images/social/procedures/new-admins.png new file mode 100644 index 00000000..9dcb1640 Binary files /dev/null and b/assets/images/social/procedures/new-admins.png differ diff --git a/assets/images/social/procedures/nixos.png b/assets/images/social/procedures/nixos.png new file mode 100644 index 00000000..4a485a4c Binary files /dev/null and b/assets/images/social/procedures/nixos.png differ diff --git a/assets/images/social/procedures/open-governance-tagging.png b/assets/images/social/procedures/open-governance-tagging.png new file mode 100644 index 00000000..6be39b8b Binary files /dev/null and b/assets/images/social/procedures/open-governance-tagging.png differ diff --git a/assets/images/social/procedures/policies.png b/assets/images/social/procedures/policies.png new file mode 100644 index 00000000..993b7348 Binary files /dev/null and b/assets/images/social/procedures/policies.png differ diff --git a/assets/images/social/procedures/post-powercut.png b/assets/images/social/procedures/post-powercut.png new file mode 100644 index 00000000..0ac7b15a Binary files /dev/null and b/assets/images/social/procedures/post-powercut.png differ diff --git a/assets/images/social/procedures/update-wp-domain.png b/assets/images/social/procedures/update-wp-domain.png new file mode 100644 index 00000000..bdfcc785 Binary files /dev/null and b/assets/images/social/procedures/update-wp-domain.png differ diff --git a/assets/images/social/procedures/vpn.png b/assets/images/social/procedures/vpn.png new file mode 100644 index 00000000..5a16a0b7 Binary files /dev/null and b/assets/images/social/procedures/vpn.png differ diff --git a/assets/images/social/services/api.png b/assets/images/social/services/api.png new file mode 100644 index 00000000..f4f0a1b6 Binary files /dev/null and b/assets/images/social/services/api.png differ diff --git a/assets/images/social/services/bastion-vm.png b/assets/images/social/services/bastion-vm.png new file mode 100644 index 00000000..efb2d65c Binary files /dev/null and b/assets/images/social/services/bastion-vm.png differ diff --git a/assets/images/social/services/bind.png b/assets/images/social/services/bind.png new file mode 100644 index 00000000..b4b4c620 Binary files /dev/null and b/assets/images/social/services/bind.png differ diff --git a/assets/images/social/services/consul.png b/assets/images/social/services/consul.png new file mode 100644 index 00000000..8d977bf1 Binary files /dev/null and b/assets/images/social/services/consul.png differ diff --git a/assets/images/social/services/exposed.png b/assets/images/social/services/exposed.png new file mode 100644 index 00000000..784eae24 Binary files /dev/null and b/assets/images/social/services/exposed.png differ diff --git a/assets/images/social/services/gitea.png b/assets/images/social/services/gitea.png new file mode 100644 index 00000000..0bc722fe Binary files /dev/null and b/assets/images/social/services/gitea.png differ diff --git a/assets/images/social/services/icecast.png b/assets/images/social/services/icecast.png new file mode 100644 index 00000000..e1c579f2 Binary files /dev/null and b/assets/images/social/services/icecast.png differ diff --git a/assets/images/social/services/index.png b/assets/images/social/services/index.png new file mode 100644 index 00000000..cfff69ea Binary files /dev/null and b/assets/images/social/services/index.png differ diff --git a/assets/images/social/services/irc.png b/assets/images/social/services/irc.png new file mode 100644 index 00000000..d779fdf5 Binary files /dev/null and b/assets/images/social/services/irc.png differ diff --git a/assets/images/social/services/ldap.png b/assets/images/social/services/ldap.png new file mode 100644 index 00000000..957df892 Binary files /dev/null and b/assets/images/social/services/ldap.png differ diff --git a/assets/images/social/services/md.png b/assets/images/social/services/md.png new file mode 100644 index 00000000..c2c1a4e4 Binary files /dev/null and b/assets/images/social/services/md.png differ diff --git a/assets/images/social/services/nfs.png b/assets/images/social/services/nfs.png new file mode 100644 index 00000000..24da875e Binary files /dev/null and b/assets/images/social/services/nfs.png differ diff --git a/assets/images/social/services/nomad.png b/assets/images/social/services/nomad.png new file mode 100644 index 00000000..370c4a66 Binary files /dev/null and b/assets/images/social/services/nomad.png differ diff --git a/assets/images/social/services/paste.png b/assets/images/social/services/paste.png new file mode 100644 index 00000000..fd142313 Binary files /dev/null and b/assets/images/social/services/paste.png differ diff --git a/assets/images/social/services/servers.png b/assets/images/social/services/servers.png new file mode 100644 index 00000000..ced81fa8 Binary files /dev/null and b/assets/images/social/services/servers.png differ diff --git a/assets/images/social/services/socs.png b/assets/images/social/services/socs.png new file mode 100644 index 00000000..36cb1c85 Binary files /dev/null and b/assets/images/social/services/socs.png differ diff --git a/assets/images/social/services/traefik.png b/assets/images/social/services/traefik.png new file mode 100644 index 00000000..f8beb93e Binary files /dev/null and b/assets/images/social/services/traefik.png differ diff --git a/assets/images/social/services/user-vms.png b/assets/images/social/services/user-vms.png new file mode 100644 index 00000000..51de3d39 Binary files /dev/null and b/assets/images/social/services/user-vms.png differ diff --git a/assets/images/social/services/wetty.png b/assets/images/social/services/wetty.png new file mode 100644 index 00000000..a9328a6c Binary files /dev/null and b/assets/images/social/services/wetty.png differ diff --git a/assets/images/social/services/znapzend.png b/assets/images/social/services/znapzend.png new file mode 100644 index 00000000..75f282cc Binary files /dev/null and b/assets/images/social/services/znapzend.png differ diff --git a/assets/images/social/tags.png b/assets/images/social/tags.png new file mode 100644 index 00000000..6ce1ed3d Binary files /dev/null and b/assets/images/social/tags.png differ diff --git a/assets/javascripts/bundle.525ec568.min.js b/assets/javascripts/bundle.525ec568.min.js new file mode 100644 index 00000000..4b08eae4 --- /dev/null +++ b/assets/javascripts/bundle.525ec568.min.js @@ -0,0 +1,16 @@ +"use strict";(()=>{var Wi=Object.create;var gr=Object.defineProperty;var Di=Object.getOwnPropertyDescriptor;var Vi=Object.getOwnPropertyNames,Vt=Object.getOwnPropertySymbols,Ni=Object.getPrototypeOf,yr=Object.prototype.hasOwnProperty,ao=Object.prototype.propertyIsEnumerable;var io=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,$=(e,t)=>{for(var r in t||(t={}))yr.call(t,r)&&io(e,r,t[r]);if(Vt)for(var r of Vt(t))ao.call(t,r)&&io(e,r,t[r]);return e};var so=(e,t)=>{var r={};for(var o in e)yr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Vt)for(var o of Vt(e))t.indexOf(o)<0&&ao.call(e,o)&&(r[o]=e[o]);return r};var xr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var zi=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Vi(t))!yr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=Di(t,n))||o.enumerable});return e};var Mt=(e,t,r)=>(r=e!=null?Wi(Ni(e)):{},zi(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var co=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var lo=xr((Er,po)=>{(function(e,t){typeof Er=="object"&&typeof po!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(k){return!!(k&&k!==document&&k.nodeName!=="HTML"&&k.nodeName!=="BODY"&&"classList"in k&&"contains"in k.classList)}function p(k){var ft=k.type,qe=k.tagName;return!!(qe==="INPUT"&&a[ft]&&!k.readOnly||qe==="TEXTAREA"&&!k.readOnly||k.isContentEditable)}function c(k){k.classList.contains("focus-visible")||(k.classList.add("focus-visible"),k.setAttribute("data-focus-visible-added",""))}function l(k){k.hasAttribute("data-focus-visible-added")&&(k.classList.remove("focus-visible"),k.removeAttribute("data-focus-visible-added"))}function f(k){k.metaKey||k.altKey||k.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(k){o=!1}function d(k){s(k.target)&&(o||p(k.target))&&c(k.target)}function y(k){s(k.target)&&(k.target.classList.contains("focus-visible")||k.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(k.target))}function L(k){document.visibilityState==="hidden"&&(n&&(o=!0),X())}function X(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function te(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(k){k.target.nodeName&&k.target.nodeName.toLowerCase()==="html"||(o=!1,te())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",L,!0),X(),r.addEventListener("focus",d,!0),r.addEventListener("blur",y,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var qr=xr((hy,On)=>{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var $a=/["'&<>]/;On.exports=Pa;function Pa(e){var t=""+e,r=$a.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof It=="object"&&typeof Yr=="object"?Yr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof It=="object"?It.ClipboardJS=r():t.ClipboardJS=r()})(It,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Ui}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(A){return!1}}var d=function(A){var M=f()(A);return u("cut"),M},y=d;function L(V){var A=document.documentElement.getAttribute("dir")==="rtl",M=document.createElement("textarea");M.style.fontSize="12pt",M.style.border="0",M.style.padding="0",M.style.margin="0",M.style.position="absolute",M.style[A?"right":"left"]="-9999px";var F=window.pageYOffset||document.documentElement.scrollTop;return M.style.top="".concat(F,"px"),M.setAttribute("readonly",""),M.value=V,M}var X=function(A,M){var F=L(A);M.container.appendChild(F);var D=f()(F);return u("copy"),F.remove(),D},te=function(A){var M=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},F="";return typeof A=="string"?F=X(A,M):A instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(A==null?void 0:A.type)?F=X(A.value,M):(F=f()(A),u("copy")),F},J=te;function k(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?k=function(M){return typeof M}:k=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},k(V)}var ft=function(){var A=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},M=A.action,F=M===void 0?"copy":M,D=A.container,Y=A.target,$e=A.text;if(F!=="copy"&&F!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&k(Y)==="object"&&Y.nodeType===1){if(F==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(F==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if($e)return J($e,{container:D});if(Y)return F==="cut"?y(Y):J(Y,{container:D})},qe=ft;function Fe(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Fe=function(M){return typeof M}:Fe=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},Fe(V)}function ki(V,A){if(!(V instanceof A))throw new TypeError("Cannot call a class as a function")}function no(V,A){for(var M=0;M0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=Fe(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function($e){return Y.onClick($e)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,$e=this.action(Y)||"copy",Dt=qe({action:$e,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Dt?"success":"error",{action:$e,text:Dt,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return y(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,$e=!!document.queryCommandSupported;return Y.forEach(function(Dt){$e=$e&&!!document.queryCommandSupported(Dt)}),$e}}]),M}(s()),Ui=Fi},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,d,y){var L=c.apply(this,arguments);return l.addEventListener(u,L,y),{destroy:function(){l.removeEventListener(u,L,y)}}}function p(l,f,u,d,y){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(L){return s(L,f,u,d,y)}))}function c(l,f,u,d){return function(y){y.delegateTarget=a(y.target,f),y.delegateTarget&&d.call(l,y)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,d,y){if(!u&&!d&&!y)throw new Error("Missing required arguments");if(!a.string(d))throw new TypeError("Second argument must be a String");if(!a.fn(y))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,d,y);if(a.nodeList(u))return l(u,d,y);if(a.string(u))return f(u,d,y);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,d,y){return u.addEventListener(d,y),{destroy:function(){u.removeEventListener(d,y)}}}function l(u,d,y){return Array.prototype.forEach.call(u,function(L){L.addEventListener(d,y)}),{destroy:function(){Array.prototype.forEach.call(u,function(L){L.removeEventListener(d,y)})}}}function f(u,d,y){return s(document.body,u,d,y)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||p(d,L)})},y&&(n[d]=y(n[d])))}function p(d,y){try{c(o[d](y))}catch(L){u(i[0][3],L)}}function c(d){d.value instanceof nt?Promise.resolve(d.value.v).then(l,f):u(i[0][2],d)}function l(d){p("next",d)}function f(d){p("throw",d)}function u(d,y){d(y),i.shift(),i.length&&p(i[0][0],i[0][1])}}function uo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof he=="function"?he(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function H(e){return typeof e=="function"}function ut(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ut(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function Qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Ue=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=he(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(L){t={error:L}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(H(l))try{l()}catch(L){i=L instanceof zt?L.errors:[L]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=he(f),d=u.next();!d.done;d=u.next()){var y=d.value;try{ho(y)}catch(L){i=i!=null?i:[],L instanceof zt?i=q(q([],N(i)),N(L.errors)):i.push(L)}}}catch(L){o={error:L}}finally{try{d&&!d.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)ho(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=Ue.EMPTY;function qt(e){return e instanceof Ue||e&&"closed"in e&&H(e.remove)&&H(e.add)&&H(e.unsubscribe)}function ho(e){H(e)?e():e.unsubscribe()}var Pe={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var dt={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new Ue(function(){o.currentObservers=null,Qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new j;return r.source=this,r},t.create=function(r,o){return new To(r,o)},t}(j);var To=function(e){oe(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){oe(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var At={now:function(){return(At.delegate||Date).now()},delegate:void 0};var Ct=function(e){oe(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=At);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(gt);var Lo=function(e){oe(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(yt);var kr=new Lo(Oo);var Mo=function(e){oe(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=vt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(vt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(gt);var _o=function(e){oe(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(yt);var me=new _o(Mo);var S=new j(function(e){return e.complete()});function Yt(e){return e&&H(e.schedule)}function Hr(e){return e[e.length-1]}function Xe(e){return H(Hr(e))?e.pop():void 0}function ke(e){return Yt(Hr(e))?e.pop():void 0}function Bt(e,t){return typeof Hr(e)=="number"?e.pop():t}var xt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return H(e==null?void 0:e.then)}function Jt(e){return H(e[bt])}function Xt(e){return Symbol.asyncIterator&&H(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Zi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Zi();function tr(e){return H(e==null?void 0:e[er])}function rr(e){return fo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return H(e==null?void 0:e.getReader)}function U(e){if(e instanceof j)return e;if(e!=null){if(Jt(e))return ea(e);if(xt(e))return ta(e);if(Gt(e))return ra(e);if(Xt(e))return Ao(e);if(tr(e))return oa(e);if(or(e))return na(e)}throw Zt(e)}function ea(e){return new j(function(t){var r=e[bt]();if(H(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function ta(e){return new j(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?b(function(n,i){return e(n,i,o)}):le,Te(1),r?De(t):Qo(function(){return new ir}))}}function jr(e){return e<=0?function(){return S}:E(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,d=0,y=!1,L=!1,X=function(){f==null||f.unsubscribe(),f=void 0},te=function(){X(),l=u=void 0,y=L=!1},J=function(){var k=l;te(),k==null||k.unsubscribe()};return E(function(k,ft){d++,!L&&!y&&X();var qe=u=u!=null?u:r();ft.add(function(){d--,d===0&&!L&&!y&&(f=Ur(J,p))}),qe.subscribe(ft),!l&&d>0&&(l=new at({next:function(Fe){return qe.next(Fe)},error:function(Fe){L=!0,X(),f=Ur(te,n,Fe),qe.error(Fe)},complete:function(){y=!0,X(),f=Ur(te,a),qe.complete()}}),U(k).subscribe(l))})(c)}}function Ur(e,t){for(var r=[],o=2;oe.next(document)),e}function P(e,t=document){return Array.from(t.querySelectorAll(e))}function R(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Ie(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var wa=O(h(document.body,"focusin"),h(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Ie()||document.body),G(1));function et(e){return wa.pipe(m(t=>e.contains(t)),K())}function $t(e,t){return C(()=>O(h(e,"mouseenter").pipe(m(()=>!0)),h(e,"mouseleave").pipe(m(()=>!1))).pipe(t?Ht(r=>Le(+!r*t)):le,Q(e.matches(":hover"))))}function Jo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Jo(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Jo(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function Tt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),O(h(t,"load"),h(t,"error").pipe(v(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),_(()=>document.head.removeChild(t)),Te(1))))}var Xo=new g,Ta=C(()=>typeof ResizeObserver=="undefined"?Tt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Xo.next(t)))),v(e=>O(Ye,I(e)).pipe(_(()=>e.disconnect()))),G(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return Ta.pipe(w(r=>r.observe(t)),v(r=>Xo.pipe(b(o=>o.target===t),_(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function St(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Zo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ve(e){return{x:e.offsetLeft,y:e.offsetTop}}function en(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function tn(e){return O(h(window,"load"),h(window,"resize")).pipe(Me(0,me),m(()=>Ve(e)),Q(Ve(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function Ne(e){return O(h(e,"scroll"),h(window,"scroll"),h(window,"resize")).pipe(Me(0,me),m(()=>pr(e)),Q(pr(e)))}var rn=new g,Sa=C(()=>I(new IntersectionObserver(e=>{for(let t of e)rn.next(t)},{threshold:0}))).pipe(v(e=>O(Ye,I(e)).pipe(_(()=>e.disconnect()))),G(1));function tt(e){return Sa.pipe(w(t=>t.observe(e)),v(t=>rn.pipe(b(({target:r})=>r===e),_(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function on(e,t=16){return Ne(e).pipe(m(({y:r})=>{let o=ce(e),n=St(e);return r>=n.height-o.height-t}),K())}var lr={drawer:R("[data-md-toggle=drawer]"),search:R("[data-md-toggle=search]")};function nn(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function ze(e){let t=lr[e];return h(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function Oa(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function La(){return O(h(window,"compositionstart").pipe(m(()=>!0)),h(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function an(){let e=h(window,"keydown").pipe(b(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:nn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),b(({mode:t,type:r})=>{if(t==="global"){let o=Ie();if(typeof o!="undefined")return!Oa(o,r)}return!0}),pe());return La().pipe(v(t=>t?S:e))}function ye(){return new URL(location.href)}function lt(e,t=!1){if(B("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function sn(){return new g}function cn(){return location.hash.slice(1)}function pn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Ma(e){return O(h(window,"hashchange"),e).pipe(m(cn),Q(cn()),b(t=>t.length>0),G(1))}function ln(e){return Ma(e).pipe(m(t=>fe(`[id="${t}"]`)),b(t=>typeof t!="undefined"))}function Pt(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function mn(){let e=matchMedia("print");return O(h(window,"beforeprint").pipe(m(()=>!0)),h(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(v(r=>r?t():S))}function zr(e,t){return new j(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function je(e,t){return zr(e,t).pipe(v(r=>r.text()),m(r=>JSON.parse(r)),G(1))}function fn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),G(1))}function un(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),G(1))}function dn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function hn(){return O(h(window,"scroll",{passive:!0}),h(window,"resize",{passive:!0})).pipe(m(dn),Q(dn()))}function bn(){return{width:innerWidth,height:innerHeight}}function vn(){return h(window,"resize",{passive:!0}).pipe(m(bn),Q(bn()))}function gn(){return z([hn(),vn()]).pipe(m(([e,t])=>({offset:e,size:t})),G(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(ee("size")),n=z([o,r]).pipe(m(()=>Ve(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function _a(e){return h(e,"message",t=>t.data)}function Aa(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function yn(e,t=new Worker(e)){let r=_a(t),o=Aa(t),n=new g;n.subscribe(o);let i=o.pipe(Z(),ie(!0));return n.pipe(Z(),Re(r.pipe(W(i))),pe())}var Ca=R("#__config"),Ot=JSON.parse(Ca.textContent);Ot.base=`${new URL(Ot.base,ye())}`;function xe(){return Ot}function B(e){return Ot.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?Ot.translations[e].replace("#",t.toString()):Ot.translations[e]}function Se(e,t=document){return R(`[data-md-component=${e}]`,t)}function ae(e,t=document){return P(`[data-md-component=${e}]`,t)}function ka(e){let t=R(".md-typeset > :first-child",e);return h(t,"click",{once:!0}).pipe(m(()=>R(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function xn(e){if(!B("announce.dismiss")||!e.childElementCount)return S;if(!e.hidden){let t=R(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),ka(e).pipe(w(r=>t.next(r)),_(()=>t.complete()),m(r=>$({ref:e},r)))})}function Ha(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function En(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Ha(e,t).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))}function Rt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function wn(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function Tn(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Rt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Rt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function Sn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}var Ln=Mt(qr());function Qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,(0,Ln.default)(c))," "],[]).slice(0,-1),i=xe(),a=new URL(e.location,i.base);B("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=xe();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&x("nav",{class:"md-tags"},e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)})),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Mn(e){let t=e[0].score,r=[...e],o=xe(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.scoreQr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>Qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function _n(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Kr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function An(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function Ra(e){var o;let t=xe(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function Cn(e,t){var o;let r=xe();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map(Ra)))}var Ia=0;function ja(e){let t=z([et(e),$t(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Zo(e)).pipe(ne(Ne),pt(1),He(t),m(()=>en(e)));return t.pipe(Ae(o=>o),v(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function Fa(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Ia++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(Z(),ie(!1)).subscribe(a);let s=a.pipe(Ht(c=>Le(+!c*250,kr)),K(),v(c=>c?r:S),w(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(v(c=>$t(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(b(c=>c),re(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),d=u.width/2;if(l.role==="tooltip")return{x:d,y:8+u.height};if(u.y>=f.height/2){let{height:y}=ce(l);return{x:d,y:-16-y}}else return{x:d,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(b(c=>c),re(s,(c,l)=>l),b(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(R(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),ve(me),re(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(b(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(b(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),ja(e).pipe(w(c=>i.next(c)),_(()=>i.complete()),m(c=>$({ref:e},c)))})}function mt(e,{viewport$:t},r=document.body){return Fa(e,{content$:new j(o=>{let n=e.title,i=wn(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Ua(e,t){let r=C(()=>z([tn(e),Ne(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(v(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function kn(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(Z(),ie(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(W(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),O(i.pipe(b(({active:s})=>s)),i.pipe(_e(250),b(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Me(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(pt(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),h(n,"click").pipe(W(a),b(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),h(n,"mousedown").pipe(W(a),re(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Ie())==null||c.blur()}}),r.pipe(W(a),b(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Ua(e,t).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))})}function Wa(e){return e.tagName==="CODE"?P(".c, .c1, .cm",e):[e]}function Da(e){let t=[];for(let r of Wa(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function Hn(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Da(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,Tn(p,i)),s.replaceWith(a.get(p)))}return a.size===0?S:C(()=>{let s=new g,p=s.pipe(Z(),ie(!0)),c=[];for(let[l,f]of a)c.push([R(".md-typeset",f),R(`:scope > li:nth-child(${l})`,e)]);return o.pipe(W(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?Hn(f,u):Hn(u,f)}),O(...[...a].map(([,l])=>kn(l,t,{target$:r}))).pipe(_(()=>s.complete()),pe())})}function $n(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return $n(t)}}function Pn(e,t){return C(()=>{let r=$n(e);return typeof r!="undefined"?fr(r,e,t):S})}var Rn=Mt(Br());var Va=0;function In(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return In(t)}}function Na(e){return ge(e).pipe(m(({width:t})=>({scrollable:St(e).width>t})),ee("scrollable"))}function jn(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(jr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Rn.default.isSupported()&&(e.closest(".copy")||B("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Va++}`;let l=Sn(c.id);c.insertBefore(l,e),B("content.tooltips")&&a.push(mt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=In(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||B("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(W(i),m(({width:f,height:u})=>f&&u),K(),v(f=>f?l:S)))}}return P(":scope > span[id]",e).length&&e.classList.add("md-code__content"),Na(e).pipe(w(c=>n.next(c)),_(()=>n.complete()),m(c=>$({ref:e},c)),Re(...a))});return B("content.lazy")?tt(e).pipe(b(n=>n),Te(1),v(()=>o)):o}function za(e,{target$:t,print$:r}){let o=!0;return O(t.pipe(m(n=>n.closest("details:not([open])")),b(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(b(n=>n||!o),w(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Fn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),za(e,t).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}var Un=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel p,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel p{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Gr,Qa=0;function Ka(){return typeof mermaid=="undefined"||mermaid instanceof Element?Tt("https://unpkg.com/mermaid@11/dist/mermaid.min.js"):I(void 0)}function Wn(e){return e.classList.remove("mermaid"),Gr||(Gr=Ka().pipe(w(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Un,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),G(1))),Gr.subscribe(()=>co(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${Qa++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Gr.pipe(m(()=>({ref:e})))}var Dn=x("table");function Vn(e){return e.replaceWith(Dn),Dn.replaceWith(An(e)),I({ref:e})}function Ya(e){let t=e.find(r=>r.checked)||e[0];return O(...e.map(r=>h(r,"change").pipe(m(()=>R(`label[for="${r.id}"]`))))).pipe(Q(R(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Nn(e,{viewport$:t,target$:r}){let o=R(".tabbed-labels",e),n=P(":scope > input",e),i=Kr("prev");e.append(i);let a=Kr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(Z(),ie(!0));z([s,ge(e),tt(e)]).pipe(W(p),Me(1,me)).subscribe({next([{active:c},l]){let f=Ve(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let d=pr(o);(f.xd.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([Ne(o),ge(o)]).pipe(W(p)).subscribe(([c,l])=>{let f=St(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),O(h(i,"click").pipe(m(()=>-1)),h(a,"click").pipe(m(()=>1))).pipe(W(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(W(p),b(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=R(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),h(l.firstElementChild,"click").pipe(W(p),b(f=>!(f.metaKey||f.ctrlKey)),w(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return B("content.tabs.link")&&s.pipe(Ce(1),re(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let y of P("[data-tabs]"))for(let L of P(":scope > input",y)){let X=R(`label[for="${L.id}"]`);if(X!==c&&X.innerText.trim()===f){X.setAttribute("data-md-switching",""),L.click();break}}window.scrollTo({top:e.offsetTop-u});let d=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...d])])}}),s.pipe(W(p)).subscribe(()=>{for(let c of P("audio, video",e))c.pause()}),Ya(n).pipe(w(c=>s.next(c)),_(()=>s.complete()),m(c=>$({ref:e},c)))}).pipe(Ke(se))}function zn(e,{viewport$:t,target$:r,print$:o}){return O(...P(".annotate:not(.highlight)",e).map(n=>Pn(n,{target$:r,print$:o})),...P("pre:not(.mermaid) > code",e).map(n=>jn(n,{target$:r,print$:o})),...P("pre.mermaid",e).map(n=>Wn(n)),...P("table:not([class])",e).map(n=>Vn(n)),...P("details",e).map(n=>Fn(n,{target$:r,print$:o})),...P("[data-tabs]",e).map(n=>Nn(n,{viewport$:t,target$:r})),...P("[title]",e).filter(()=>B("content.tooltips")).map(n=>mt(n,{viewport$:t})))}function Ba(e,{alert$:t}){return t.pipe(v(r=>O(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function qn(e,t){let r=R(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),Ba(e,t).pipe(w(n=>o.next(n)),_(()=>o.complete()),m(n=>$({ref:e},n)))})}var Ga=0;function Ja(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?Ne(o):I({x:0,y:0}),i=O(et(t),$t(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ve(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function Qn(e){let t=e.title;if(!t.length)return S;let r=`__tooltip_${Ga++}`,o=Rt(r,"inline"),n=R(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),O(i.pipe(b(({active:a})=>a)),i.pipe(_e(250),b(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Me(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(pt(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Ja(o,e).pipe(w(a=>i.next(a)),_(()=>i.complete()),m(a=>$({ref:e},a)))}).pipe(Ke(se))}function Xa({viewport$:e}){if(!B("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Be(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=ze("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),v(n=>n?r:I(!1)),Q(!1))}function Kn(e,t){return C(()=>z([ge(e),Xa(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),G(1))}function Yn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(Z(),ie(!0));o.pipe(ee("active"),He(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue(P("[title]",e)).pipe(b(()=>B("content.tooltips")),ne(a=>Qn(a)));return r.subscribe(o),t.pipe(W(n),m(a=>$({ref:e},a)),Re(i.pipe(W(n))))})}function Za(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),ee("active"))}function Bn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?S:Za(o,t).pipe(w(n=>r.next(n)),_(()=>r.complete()),m(n=>$({ref:e},n)))})}function Gn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(v(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),ee("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function es(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(ne(o=>h(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),G(1))}function Jn(e){let t=P("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=Pt("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;sa.key==="Enter"),re(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(ve(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),es(t).pipe(W(n.pipe(Ce(1))),ct(),w(a=>i.next(a)),_(()=>i.complete()),m(a=>$({ref:e},a)))})}function Xn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(w(o=>r.next({value:o})),_(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Jr=Mt(Br());function ts(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Zn({alert$:e}){Jr.default.isSupported()&&new j(t=>{new Jr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||ts(R(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(w(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function ei(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function rs(e,t){let r=new Map;for(let o of P("url",e)){let n=R("loc",o),i=[ei(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of P("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(ei(new URL(s),t))}}return r}function ur(e){return un(new URL("sitemap.xml",e)).pipe(m(t=>rs(t,new URL(e))),de(()=>I(new Map)))}function os(e,t){if(!(e.target instanceof Element))return S;let r=e.target.closest("a");if(r===null)return S;if(r.target||e.metaKey||e.ctrlKey)return S;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):S}function ti(e){let t=new Map;for(let r of P(":scope > *",e.head))t.set(r.outerHTML,r);return t}function ri(e){for(let t of P("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function ns(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...B("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=ti(document);for(let[o,n]of ti(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return We(P("script",r)).pipe(v(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new j(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),S}),Z(),ie(document))}function oi({location$:e,viewport$:t,progress$:r}){let o=xe();if(location.protocol==="file:")return S;let n=ur(o.base);I(document).subscribe(ri);let i=h(document.body,"click").pipe(He(n),v(([p,c])=>os(p,c)),pe()),a=h(window,"popstate").pipe(m(ye),pe());i.pipe(re(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),O(i,a).subscribe(e);let s=e.pipe(ee("pathname"),v(p=>fn(p,{progress$:r}).pipe(de(()=>(lt(p,!0),S)))),v(ri),v(ns),pe());return O(s.pipe(re(e,(p,c)=>c)),s.pipe(v(()=>e),ee("pathname"),v(()=>e),ee("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),v(()=>i),w(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",pn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),h(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(ee("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var ni=Mt(qr());function ii(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}${a}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,ni.default)(a).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function jt(e){return e.type===1}function dr(e){return e.type===3}function ai(e,t){let r=yn(e);return O(I(location.protocol!=="file:"),ze("search")).pipe(Ae(o=>o),v(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:B("search.suggest")}}})),r}function si(e){var l;let{selectedVersionSitemap:t,selectedVersionBaseURL:r,currentLocation:o,currentBaseURL:n}=e,i=(l=Xr(n))==null?void 0:l.pathname;if(i===void 0)return;let a=ss(o.pathname,i);if(a===void 0)return;let s=ps(t.keys());if(!t.has(s))return;let p=Xr(a,s);if(!p||!t.has(p.href))return;let c=Xr(a,r);if(c)return c.hash=o.hash,c.search=o.search,c}function Xr(e,t){try{return new URL(e,t)}catch(r){return}}function ss(e,t){if(e.startsWith(t))return e.slice(t.length)}function cs(e,t){let r=Math.min(e.length,t.length),o;for(o=0;oS)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),v(n=>h(document.body,"click").pipe(b(i=>!i.metaKey&&!i.ctrlKey),re(o),v(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?S:(i.preventDefault(),I(new URL(p)))}}return S}),v(i=>ur(i).pipe(m(a=>{var s;return(s=si({selectedVersionSitemap:a,selectedVersionBaseURL:i,currentLocation:ye(),currentBaseURL:t.base}))!=null?s:i})))))).subscribe(n=>lt(n,!0)),z([r,o]).subscribe(([n,i])=>{R(".md-header__topic").appendChild(Cn(n,i))}),e.pipe(v(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function ls(e,{worker$:t}){let{searchParams:r}=ye();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),ze("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=ye();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=O(t.pipe(Ae(jt)),h(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),G(1))}function pi(e,{worker$:t}){let r=new g,o=r.pipe(Z(),ie(!0));z([t.pipe(Ae(jt)),r],(i,a)=>a).pipe(ee("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(ee("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),h(e.form,"reset").pipe(W(o)).subscribe(()=>e.focus());let n=R("header [for=__search]");return h(n,"click").subscribe(()=>e.focus()),ls(e,{worker$:t}).pipe(w(i=>r.next(i)),_(()=>r.complete()),m(i=>$({ref:e},i)),G(1))}function li(e,{worker$:t,query$:r}){let o=new g,n=on(e.parentElement).pipe(b(Boolean)),i=e.parentElement,a=R(":scope > :first-child",e),s=R(":scope > :last-child",e);ze("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(re(r),Wr(t.pipe(Ae(jt)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(w(()=>s.innerHTML=""),v(({items:l})=>O(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Be(4),Vr(n),v(([f])=>f)))),m(Mn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(ne(l=>{let f=fe("details",l);return typeof f=="undefined"?S:h(f,"toggle").pipe(W(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(b(dr),m(({data:l})=>l)).pipe(w(l=>o.next(l)),_(()=>o.complete()),m(l=>$({ref:e},l)))}function ms(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=ye();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function mi(e,t){let r=new g,o=r.pipe(Z(),ie(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),h(e,"click").pipe(W(o)).subscribe(n=>n.preventDefault()),ms(e,t).pipe(w(n=>r.next(n)),_(()=>r.complete()),m(n=>$({ref:e},n)))}function fi(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=O(h(n,"keydown"),h(n,"focus")).pipe(ve(se),m(()=>n.value),K());return o.pipe(He(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(b(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(b(dr),m(({data:s})=>s)).pipe(w(s=>o.next(s)),_(()=>o.complete()),m(()=>({ref:e})))}function ui(e,{index$:t,keyboard$:r}){let o=xe();try{let n=ai(o.search,t),i=Se("search-query",e),a=Se("search-result",e);h(e,"click").pipe(b(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(b(({mode:p})=>p==="search")).subscribe(p=>{let c=Ie();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of P(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,d])=>d-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...P(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Ie()&&i.focus()}}),r.pipe(b(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=pi(i,{worker$:n});return O(s,li(a,{worker$:n,query$:s})).pipe(Re(...ae("search-share",e).map(p=>mi(p,{query$:s})),...ae("search-suggest",e).map(p=>fi(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ye}}function di(e,{index$:t,location$:r}){return z([t,r.pipe(Q(ye()),b(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>ii(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function fs(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Zr(e,o){var n=o,{header$:t}=n,r=so(n,["header$"]);let i=R(".md-sidebar__scrollwrap",e),{y:a}=Ve(i);return C(()=>{let s=new g,p=s.pipe(Z(),ie(!0)),c=s.pipe(Me(0,me));return c.pipe(re(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of P(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=ce(f);f.scrollTo({top:u-d/2})}}}),ue(P("label[tabindex]",e)).pipe(ne(l=>h(l,"click").pipe(ve(se),m(()=>l),W(p)))).subscribe(l=>{let f=R(`[id="${l.htmlFor}"]`);R(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),fs(e,r).pipe(w(l=>s.next(l)),_(()=>s.complete()),m(l=>$({ref:e},l)))})}function hi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return st(je(`${r}/releases/latest`).pipe(de(()=>S),m(o=>({version:o.tag_name})),De({})),je(r).pipe(de(()=>S),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),De({}))).pipe(m(([o,n])=>$($({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return je(r).pipe(m(o=>({repositories:o.public_repos})),De({}))}}function bi(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return st(je(`${r}/releases/permalink/latest`).pipe(de(()=>S),m(({tag_name:o})=>({version:o})),De({})),je(r).pipe(de(()=>S),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),De({}))).pipe(m(([o,n])=>$($({},o),n)))}function vi(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return hi(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return bi(r,o)}return S}var us;function ds(e){return us||(us=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return S}return vi(e.href).pipe(w(o=>__md_set("__source",o,sessionStorage)))}).pipe(de(()=>S),b(t=>Object.keys(t).length>0),m(t=>({facts:t})),G(1)))}function gi(e){let t=R(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(_n(o)),t.classList.add("md-source__repository--active")}),ds(e).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}function hs(e,{viewport$:t,header$:r}){return ge(document.body).pipe(v(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),ee("hidden"))}function yi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(B("navigation.tabs.sticky")?I({hidden:!1}):hs(e,t)).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}function bs(e,{viewport$:t,header$:r}){let o=new Map,n=P(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(ee("height"),m(({height:s})=>{let p=Se("main"),c=R(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(ee("height"),v(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let d=f.offsetParent;for(;d;d=d.offsetParent)u+=d.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),He(i),v(([p,c])=>t.pipe(Fr(([l,f],{offset:{y:u},size:d})=>{let y=u+d.height>=Math.floor(s.height);for(;f.length;){let[,L]=f[0];if(L-c=u&&!y)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Be(2,1),m(([s,p])=>s.prev.length{let i=new g,a=i.pipe(Z(),ie(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),B("toc.follow")){let s=O(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(b(({prev:p})=>p.length>0),He(o.pipe(ve(se))),re(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=ce(f);f.scrollTo({top:u-d/2,behavior:c})}}})}return B("navigation.tracking")&&t.pipe(W(a),ee("offset"),_e(250),Ce(1),W(n.pipe(Ce(1))),ct({delay:250}),re(i)).subscribe(([,{prev:s}])=>{let p=ye(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),bs(e,{viewport$:t,header$:r}).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))})}function vs(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Be(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),W(o.pipe(Ce(1))),ie(!0),ct({delay:250}),m(a=>({hidden:a})))}function Ei(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(Z(),ie(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(W(a),ee("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),h(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),vs(e,{viewport$:t,main$:o,target$:n}).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))}function wi({document$:e,viewport$:t}){e.pipe(v(()=>P(".md-ellipsis")),ne(r=>tt(r).pipe(W(e.pipe(Ce(1))),b(o=>o),m(()=>r),Te(1))),b(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,B("content.tooltips")?mt(n,{viewport$:t}).pipe(W(e.pipe(Ce(1))),_(()=>n.removeAttribute("title"))):S})).subscribe(),B("content.tooltips")&&e.pipe(v(()=>P(".md-status")),ne(r=>mt(r,{viewport$:t}))).subscribe()}function Ti({document$:e,tablet$:t}){e.pipe(v(()=>P(".md-toggle--indeterminate")),w(r=>{r.indeterminate=!0,r.checked=!1}),ne(r=>h(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),re(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function gs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Si({document$:e}){e.pipe(v(()=>P("[data-md-scrollfix]")),w(t=>t.removeAttribute("data-md-scrollfix")),b(gs),ne(t=>h(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function Oi({viewport$:e,tablet$:t}){z([ze("search"),t]).pipe(m(([r,o])=>r&&!o),v(r=>I(r).pipe(Ge(r?400:100))),re(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function ys(){return location.protocol==="file:"?Tt(`${new URL("search/search_index.js",eo.base)}`).pipe(m(()=>__index),G(1)):je(new URL("search/search_index.json",eo.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Go(),Ut=sn(),Lt=ln(Ut),to=an(),Oe=gn(),hr=Pt("(min-width: 960px)"),Mi=Pt("(min-width: 1220px)"),_i=mn(),eo=xe(),Ai=document.forms.namedItem("search")?ys():Ye,ro=new g;Zn({alert$:ro});var oo=new g;B("navigation.instant")&&oi({location$:Ut,viewport$:Oe,progress$:oo}).subscribe(ot);var Li;((Li=eo.version)==null?void 0:Li.provider)==="mike"&&ci({document$:ot});O(Ut,Lt).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});to.pipe(b(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&<(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&<(r);break;case"Enter":let o=Ie();o instanceof HTMLLabelElement&&o.click()}});wi({viewport$:Oe,document$:ot});Ti({document$:ot,tablet$:hr});Si({document$:ot});Oi({viewport$:Oe,tablet$:hr});var rt=Kn(Se("header"),{viewport$:Oe}),Ft=ot.pipe(m(()=>Se("main")),v(e=>Gn(e,{viewport$:Oe,header$:rt})),G(1)),xs=O(...ae("consent").map(e=>En(e,{target$:Lt})),...ae("dialog").map(e=>qn(e,{alert$:ro})),...ae("header").map(e=>Yn(e,{viewport$:Oe,header$:rt,main$:Ft})),...ae("palette").map(e=>Jn(e)),...ae("progress").map(e=>Xn(e,{progress$:oo})),...ae("search").map(e=>ui(e,{index$:Ai,keyboard$:to})),...ae("source").map(e=>gi(e))),Es=C(()=>O(...ae("announce").map(e=>xn(e)),...ae("content").map(e=>zn(e,{viewport$:Oe,target$:Lt,print$:_i})),...ae("content").map(e=>B("search.highlight")?di(e,{index$:Ai,location$:Ut}):S),...ae("header-title").map(e=>Bn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Mi,()=>Zr(e,{viewport$:Oe,header$:rt,main$:Ft})):Nr(hr,()=>Zr(e,{viewport$:Oe,header$:rt,main$:Ft}))),...ae("tabs").map(e=>yi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>xi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Lt})),...ae("top").map(e=>Ei(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Lt})))),Ci=ot.pipe(v(()=>Es),Re(xs),G(1));Ci.subscribe();window.document$=ot;window.location$=Ut;window.target$=Lt;window.keyboard$=to;window.viewport$=Oe;window.tablet$=hr;window.screen$=Mi;window.print$=_i;window.alert$=ro;window.progress$=oo;window.component$=Ci;})(); +//# sourceMappingURL=bundle.525ec568.min.js.map + diff --git a/assets/javascripts/bundle.525ec568.min.js.map b/assets/javascripts/bundle.525ec568.min.js.map new file mode 100644 index 00000000..ef5d8d34 --- /dev/null +++ b/assets/javascripts/bundle.525ec568.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/escape-html/index.js", "node_modules/clipboard/dist/clipboard.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/tslib/tslib.es6.mjs", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/findurl/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/******************************************************************************\nCopyright (c) Microsoft Corporation.\n\nPermission to use, copy, modify, and/or distribute this software for any\npurpose with or without fee is hereby granted.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\nPERFORMANCE OF THIS SOFTWARE.\n***************************************************************************** */\n/* global Reflect, Promise, SuppressedError, Symbol, Iterator */\n\nvar extendStatics = function(d, b) {\n extendStatics = Object.setPrototypeOf ||\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\n return extendStatics(d, b);\n};\n\nexport function __extends(d, b) {\n if (typeof b !== \"function\" && b !== null)\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\n extendStatics(d, b);\n function __() { this.constructor = d; }\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\n}\n\nexport var __assign = function() {\n __assign = Object.assign || function __assign(t) {\n for (var s, i = 1, n = arguments.length; i < n; i++) {\n s = arguments[i];\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\n }\n return t;\n }\n return __assign.apply(this, arguments);\n}\n\nexport function __rest(s, e) {\n var t = {};\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\n t[p] = s[p];\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\n t[p[i]] = s[p[i]];\n }\n return t;\n}\n\nexport function __decorate(decorators, target, key, desc) {\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\n return c > 3 && r && Object.defineProperty(target, key, r), r;\n}\n\nexport function __param(paramIndex, decorator) {\n return function (target, key) { decorator(target, key, paramIndex); }\n}\n\nexport function __esDecorate(ctor, descriptorIn, decorators, contextIn, initializers, extraInitializers) {\n function accept(f) { if (f !== void 0 && typeof f !== \"function\") throw new TypeError(\"Function expected\"); return f; }\n var kind = contextIn.kind, key = kind === \"getter\" ? \"get\" : kind === \"setter\" ? \"set\" : \"value\";\n var target = !descriptorIn && ctor ? contextIn[\"static\"] ? ctor : ctor.prototype : null;\n var descriptor = descriptorIn || (target ? Object.getOwnPropertyDescriptor(target, contextIn.name) : {});\n var _, done = false;\n for (var i = decorators.length - 1; i >= 0; i--) {\n var context = {};\n for (var p in contextIn) context[p] = p === \"access\" ? {} : contextIn[p];\n for (var p in contextIn.access) context.access[p] = contextIn.access[p];\n context.addInitializer = function (f) { if (done) throw new TypeError(\"Cannot add initializers after decoration has completed\"); extraInitializers.push(accept(f || null)); };\n var result = (0, decorators[i])(kind === \"accessor\" ? { get: descriptor.get, set: descriptor.set } : descriptor[key], context);\n if (kind === \"accessor\") {\n if (result === void 0) continue;\n if (result === null || typeof result !== \"object\") throw new TypeError(\"Object expected\");\n if (_ = accept(result.get)) descriptor.get = _;\n if (_ = accept(result.set)) descriptor.set = _;\n if (_ = accept(result.init)) initializers.unshift(_);\n }\n else if (_ = accept(result)) {\n if (kind === \"field\") initializers.unshift(_);\n else descriptor[key] = _;\n }\n }\n if (target) Object.defineProperty(target, contextIn.name, descriptor);\n done = true;\n};\n\nexport function __runInitializers(thisArg, initializers, value) {\n var useValue = arguments.length > 2;\n for (var i = 0; i < initializers.length; i++) {\n value = useValue ? initializers[i].call(thisArg, value) : initializers[i].call(thisArg);\n }\n return useValue ? value : void 0;\n};\n\nexport function __propKey(x) {\n return typeof x === \"symbol\" ? x : \"\".concat(x);\n};\n\nexport function __setFunctionName(f, name, prefix) {\n if (typeof name === \"symbol\") name = name.description ? \"[\".concat(name.description, \"]\") : \"\";\n return Object.defineProperty(f, \"name\", { configurable: true, value: prefix ? \"\".concat(prefix, \" \", name) : name });\n};\n\nexport function __metadata(metadataKey, metadataValue) {\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\n}\n\nexport function __awaiter(thisArg, _arguments, P, generator) {\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\n return new (P || (P = Promise))(function (resolve, reject) {\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\n step((generator = generator.apply(thisArg, _arguments || [])).next());\n });\n}\n\nexport function __generator(thisArg, body) {\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g = Object.create((typeof Iterator === \"function\" ? Iterator : Object).prototype);\n return g.next = verb(0), g[\"throw\"] = verb(1), g[\"return\"] = verb(2), typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\n function verb(n) { return function (v) { return step([n, v]); }; }\n function step(op) {\n if (f) throw new TypeError(\"Generator is already executing.\");\n while (g && (g = 0, op[0] && (_ = 0)), _) try {\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\n if (y = 0, t) op = [op[0] & 2, t.value];\n switch (op[0]) {\n case 0: case 1: t = op; break;\n case 4: _.label++; return { value: op[1], done: false };\n case 5: _.label++; y = op[1]; op = [0]; continue;\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\n default:\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\n if (t[2]) _.ops.pop();\n _.trys.pop(); continue;\n }\n op = body.call(thisArg, _);\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\n }\n}\n\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\n if (k2 === undefined) k2 = k;\n var desc = Object.getOwnPropertyDescriptor(m, k);\n if (!desc || (\"get\" in desc ? !m.__esModule : desc.writable || desc.configurable)) {\n desc = { enumerable: true, get: function() { return m[k]; } };\n }\n Object.defineProperty(o, k2, desc);\n}) : (function(o, m, k, k2) {\n if (k2 === undefined) k2 = k;\n o[k2] = m[k];\n});\n\nexport function __exportStar(m, o) {\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\n}\n\nexport function __values(o) {\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\n if (m) return m.call(o);\n if (o && typeof o.length === \"number\") return {\n next: function () {\n if (o && i >= o.length) o = void 0;\n return { value: o && o[i++], done: !o };\n }\n };\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\n}\n\nexport function __read(o, n) {\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\n if (!m) return o;\n var i = m.call(o), r, ar = [], e;\n try {\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\n }\n catch (error) { e = { error: error }; }\n finally {\n try {\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\n }\n finally { if (e) throw e.error; }\n }\n return ar;\n}\n\n/** @deprecated */\nexport function __spread() {\n for (var ar = [], i = 0; i < arguments.length; i++)\n ar = ar.concat(__read(arguments[i]));\n return ar;\n}\n\n/** @deprecated */\nexport function __spreadArrays() {\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\n r[k] = a[j];\n return r;\n}\n\nexport function __spreadArray(to, from, pack) {\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\n if (ar || !(i in from)) {\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\n ar[i] = from[i];\n }\n }\n return to.concat(ar || Array.prototype.slice.call(from));\n}\n\nexport function __await(v) {\n return this instanceof __await ? (this.v = v, this) : new __await(v);\n}\n\nexport function __asyncGenerator(thisArg, _arguments, generator) {\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\n return i = Object.create((typeof AsyncIterator === \"function\" ? AsyncIterator : Object).prototype), verb(\"next\"), verb(\"throw\"), verb(\"return\", awaitReturn), i[Symbol.asyncIterator] = function () { return this; }, i;\n function awaitReturn(f) { return function (v) { return Promise.resolve(v).then(f, reject); }; }\n function verb(n, f) { if (g[n]) { i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; if (f) i[n] = f(i[n]); } }\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\n function fulfill(value) { resume(\"next\", value); }\n function reject(value) { resume(\"throw\", value); }\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\n}\n\nexport function __asyncDelegator(o) {\n var i, p;\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: false } : f ? f(v) : v; } : f; }\n}\n\nexport function __asyncValues(o) {\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\n var m = o[Symbol.asyncIterator], i;\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\n}\n\nexport function __makeTemplateObject(cooked, raw) {\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\n return cooked;\n};\n\nvar __setModuleDefault = Object.create ? (function(o, v) {\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\n}) : function(o, v) {\n o[\"default\"] = v;\n};\n\nexport function __importStar(mod) {\n if (mod && mod.__esModule) return mod;\n var result = {};\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\n __setModuleDefault(result, mod);\n return result;\n}\n\nexport function __importDefault(mod) {\n return (mod && mod.__esModule) ? mod : { default: mod };\n}\n\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\n}\n\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\n}\n\nexport function __classPrivateFieldIn(state, receiver) {\n if (receiver === null || (typeof receiver !== \"object\" && typeof receiver !== \"function\")) throw new TypeError(\"Cannot use 'in' operator on non-object\");\n return typeof state === \"function\" ? receiver === state : state.has(receiver);\n}\n\nexport function __addDisposableResource(env, value, async) {\n if (value !== null && value !== void 0) {\n if (typeof value !== \"object\" && typeof value !== \"function\") throw new TypeError(\"Object expected.\");\n var dispose, inner;\n if (async) {\n if (!Symbol.asyncDispose) throw new TypeError(\"Symbol.asyncDispose is not defined.\");\n dispose = value[Symbol.asyncDispose];\n }\n if (dispose === void 0) {\n if (!Symbol.dispose) throw new TypeError(\"Symbol.dispose is not defined.\");\n dispose = value[Symbol.dispose];\n if (async) inner = dispose;\n }\n if (typeof dispose !== \"function\") throw new TypeError(\"Object not disposable.\");\n if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };\n env.stack.push({ value: value, dispose: dispose, async: async });\n }\n else if (async) {\n env.stack.push({ async: true });\n }\n return value;\n}\n\nvar _SuppressedError = typeof SuppressedError === \"function\" ? SuppressedError : function (error, suppressed, message) {\n var e = new Error(message);\n return e.name = \"SuppressedError\", e.error = error, e.suppressed = suppressed, e;\n};\n\nexport function __disposeResources(env) {\n function fail(e) {\n env.error = env.hasError ? new _SuppressedError(e, env.error, \"An error was suppressed during disposal.\") : e;\n env.hasError = true;\n }\n var r, s = 0;\n function next() {\n while (r = env.stack.pop()) {\n try {\n if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);\n if (r.dispose) {\n var result = r.dispose.call(r.value);\n if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });\n }\n else s |= 1;\n }\n catch (e) {\n fail(e);\n }\n }\n if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();\n if (env.hasError) throw env.error;\n }\n return next();\n}\n\nexport default {\n __extends,\n __assign,\n __rest,\n __decorate,\n __param,\n __metadata,\n __awaiter,\n __generator,\n __createBinding,\n __exportStar,\n __values,\n __read,\n __spread,\n __spreadArrays,\n __spreadArray,\n __await,\n __asyncGenerator,\n __asyncDelegator,\n __asyncValues,\n __makeTemplateObject,\n __importStar,\n __importDefault,\n __classPrivateFieldGet,\n __classPrivateFieldSet,\n __classPrivateFieldIn,\n __addDisposableResource,\n __disposeResources,\n};\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/css/timeago.css b/css/timeago.css new file mode 100644 index 00000000..f7ab7d69 --- /dev/null +++ b/css/timeago.css @@ -0,0 +1,15 @@ +/* + timeago output is dynamic, which breaks when you print a page. + + This CSS is only included when type: timeago + and ensures fallback to type "iso_date" when printing. + + */ + +.git-revision-date-localized-plugin-iso_date { display: none } + +@media print { + .git-revision-date-localized-plugin-iso_date { display: inline } + .git-revision-date-localized-plugin-timeago { display: none } +} + diff --git a/hardware/aperture/about/index.html b/hardware/aperture/about/index.html new file mode 100644 index 00000000..d6999054 --- /dev/null +++ b/hardware/aperture/about/index.html @@ -0,0 +1,2369 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + About Aperture - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

About Aperture

+

Aperture is Redbrick's fleet of hardware that was installed in May 2022 by distro, pints, skins, cawnj, ymacomp and arkues.

+

It consists of:

+ + + + + + + + + + + + + + + + +
CPURAMStorage
AMD 7302P 3GHz, 16C/32T, 128M, 155W, 32002x 16GB RDIMM, 3200MT/s Dual Rank4x 2TB SATA HDDs (hardware RAID)
+
    +
  • 2x Ubiquiti USW Pro - rivendell, isengard
  • +
  • 1x Ubiquiti UDM Pro - mordor
  • +
+

Servers

+

The three servers are named glados , wheatley and chell.

+

Networks

+

The firewall is called mordor, and the two 24-port switches are called rivendell and isengard.

+

Networking

+

The IP address range for the aperture subnet is 10.10.0.0/24, with 10.10.0.0/16 being used for user VMs.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
HostnameInternal AddressExternal AddressPurpose
mordor10.10.0.1N/AFirewall
rivendell10.10.0.2N/ASwitch
isengard10.10.0.3N/ASwitch
glados10.10.0.4136.206.16.4Server
wheatley10.10.0.5136.206.16.5Server
chell10.10.0.6136.206.16.6Server
+
+

Note!

+

Blue cables are used for production network.

+
+

KVM

+

nexus is the name of the KVM switch. It's internal IP address is 10.10.0.10.

+

glados is connected on port 1, wheatley on port 2, and chell on port 3.

+
+

Note!

+

Yellow cables are used for KVM network.

+
+

IDRAC

+

The new servers are all equipped with IDRACs. These still need to be configured.

+
+

Note!

+

Red cables are used for IDRAC network.

+
+

Images (click me)

+

Switching from the Old Network to the New

+

We have two address ranges that come in on a single redundant link, so we're exchanging that redundant link for two separate links, each taking responsibility for an address range (136.26.15.0/24 and 136.206.16.0/24). So we're surrendering redundancy to gain uptime/connectivity during the switchover only. Once the new servers are production ready, we can recombine the link to regain the redundancy.

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/hardware/aperture/chell/index.html b/hardware/aperture/chell/index.html new file mode 100644 index 00000000..0d661166 --- /dev/null +++ b/hardware/aperture/chell/index.html @@ -0,0 +1,2260 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Chell - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

Chell

+

Details

+
    +
  • Type: Dell R6515
  • +
  • OS: Debian 11
  • +
  • CPU: AMD 7302P 3GHz, 16C/32T, 128M, 155W
  • +
  • RAM: 2x 16GB RDIMM, 3200MT/s Dual Rank
  • +
  • Storage: 4x 2TB SATA HDDs (hardware RAID)
  • +
+

Part of aperture

+

Where to Find

+
    +
  • Internal:
      +
    • 10.10.0.6
    • +
    +
  • +
  • External:
      +
    • 136.206.16.6
    • +
    • chell.redbrick.dcu.ie
    • +
    • chell.aperture.redbrick.dcu.ie
    • +
    +
  • +
+

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/hardware/aperture/glados/index.html b/hardware/aperture/glados/index.html new file mode 100644 index 00000000..eb6f1ef0 --- /dev/null +++ b/hardware/aperture/glados/index.html @@ -0,0 +1,2260 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + GlaDOS - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

GlaDOS

+

Details

+
    +
  • Type: Dell R6515
  • +
  • OS: Debian 11
  • +
  • CPU: AMD 7302P 3GHz, 16C/32T, 128M, 155W
  • +
  • RAM: 2x 16GB RDIMM, 3200MT/s Dual Rank
  • +
  • Storage: 4x 2TB SATA HDDs (hardware RAID)
  • +
+

Part of aperture

+

Where to Find

+
    +
  • Internal:
      +
    • 10.10.0.4
    • +
    +
  • +
  • External:
      +
    • 136.206.16.4
    • +
    • glados.redbrick.dcu.ie
    • +
    • glados.aperture.redbrick.dcu.ie
    • +
    +
  • +
+

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/hardware/aperture/images/index.html b/hardware/aperture/images/index.html new file mode 100644 index 00000000..4af485e6 --- /dev/null +++ b/hardware/aperture/images/index.html @@ -0,0 +1,2258 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Aperture Images - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

Aperture Images

+

Servers

+

distro hanging cables

+

new installed servers

+

inside of server

+

Networking

+

isengard

+

mordor and rivendell

+

labelled cables for mordor and rivendell

+

labelled cables for isengard

+

tidy cables for glados, wheatley and chell

+

Some Dancing for Good Measure

+

dancing.mp4

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/hardware/aperture/index.html b/hardware/aperture/index.html new file mode 100644 index 00000000..83da61ed --- /dev/null +++ b/hardware/aperture/index.html @@ -0,0 +1,2189 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Aperture - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

Aperture

+

What is Aperture?

+

It's nothing to do with cameras. See about for more information on the hardware.

+

New Admins

+

If you're a new admin, this is a cheat sheet for you. In order to get broadly up to speed and understand the content of these pages, I suggest you read the following:

+ +

FAQ

+

So, you've hit a problem. Here's some quicklinks to some common problems:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/hardware/aperture/johnson/index.html b/hardware/aperture/johnson/index.html new file mode 100644 index 00000000..790fb9f6 --- /dev/null +++ b/hardware/aperture/johnson/index.html @@ -0,0 +1,2273 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Johnson - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

Johnson

+

Details

+

Formerly albus (in a different life)

+
    +
  • Type: Dell PowerEdge R515
  • +
  • OS: NixOS
  • +
  • CPU: 2 x Opteron 4334 6 core @ 3.2GHz
  • +
  • RAM: 32GB
  • +
  • Storage: LSI MegaRAID SAS 2108 RAID controller
  • +
  • Disks: 2 x 300gb SAS for boot, 8x 1tb SATA ZFS
  • +
  • Drives: Internal SATA DVD±RW
  • +
  • Network: 4x Onboard Ethernet, 802.3ad bonding
  • +
  • iDRAC NIC: Shared on port 1
  • +
+

Part of aperture

+

Where to Find

+
    +
  • Internal:
      +
    • 10.10.0.7
    • +
    +
  • +
  • 2nd NIC is currently unused, would be a good idea to make a bond for more throughput and redundancy on the same ip
  • +
+

Services

+ +

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/hardware/aperture/wheatley/index.html b/hardware/aperture/wheatley/index.html new file mode 100644 index 00000000..9e10bd3b --- /dev/null +++ b/hardware/aperture/wheatley/index.html @@ -0,0 +1,2260 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Wheatley - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

Wheatley

+

Details

+
    +
  • Type: Dell R6515
  • +
  • OS: Debian 11
  • +
  • CPU: AMD 7302P 3GHz, 16C/32T, 128M, 155W
  • +
  • RAM: 2x 16GB RDIMM, 3200MT/s Dual Rank
  • +
  • Storage: 4x 2TB SATA HDDs (hardware RAID)
  • +
+

Part of aperture

+

Where to Find

+
    +
  • Internal:
      +
    • 10.10.0.5
    • +
    +
  • +
  • External:
      +
    • 136.206.16.5
    • +
    • wheatley.redbrick.dcu.ie
    • +
    • wheatley.aperture.redbrick.dcu.ie
    • +
    +
  • +
+

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/hardware/azazel/index.html b/hardware/azazel/index.html new file mode 100644 index 00000000..e707a076 --- /dev/null +++ b/hardware/azazel/index.html @@ -0,0 +1,2278 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Azazel - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

Azazel

+

Details

+
    +
  • Type: Dell PowerEdge R515
  • +
  • OS: Debian 12 bookworm
  • +
  • CPU: 2 x AMD Opteron 4180 @ 2.6Ghz
  • +
  • RAM: 16GB
  • +
  • Storage: Dell PERC H200 Integrated RAID Controller
  • +
  • Disks: 2 x 146GB 15,000 RPM SAS in RAID 1
  • +
  • DAS: Worf
  • +
  • Drives: Internal SATA DVD±RW
  • +
  • Network: 2x Onboard Ethernet
  • +
+

Where to Find

+
    +
  • Internal:
      +
    • 10.5.0.1
    • +
    +
  • +
  • External:
      +
    • 136.206.15.24
    • +
    +
  • +
+

Services

+
    +
  • primary ssh login box for users (see Logging in)
  • +
  • jump-box for admins
  • +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/hardware/index.html b/hardware/index.html new file mode 100644 index 00000000..2e16eb6d --- /dev/null +++ b/hardware/index.html @@ -0,0 +1,2181 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Hardware - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

Hardware

+

Here is a list of current hardware in Redbrick's suite of servers, switches and other bits.

+

Login Boxes

+ +

NixOS Boxes

+ +

Aperture

+ +

Network Hardware

+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/hardware/network/arse/index.html b/hardware/network/arse/index.html new file mode 100644 index 00000000..74d5fb0a --- /dev/null +++ b/hardware/network/arse/index.html @@ -0,0 +1,2155 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Arse - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

Arse

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/hardware/network/cerberus/index.html b/hardware/network/cerberus/index.html new file mode 100644 index 00000000..03fc5517 --- /dev/null +++ b/hardware/network/cerberus/index.html @@ -0,0 +1,2155 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Cerberus (SRX) - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

Cerberus (SRX)

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/hardware/network/index.html b/hardware/network/index.html new file mode 100644 index 00000000..c8cfe533 --- /dev/null +++ b/hardware/network/index.html @@ -0,0 +1,2150 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Redbrick Network Architecture - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

Redbrick Network Architecture

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/hardware/network/mordor/index.html b/hardware/network/mordor/index.html new file mode 100644 index 00000000..36e589c1 --- /dev/null +++ b/hardware/network/mordor/index.html @@ -0,0 +1,2297 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Mordor - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

Mordor

+ + +

Setup

+

The firewall is set up using the personal setup type, using the elected-admins@redbrick.dcu.ie account (stored in pwsafe

+

2FA is stored on the same device as the Github 2FA code.

+

Automatic Updates

+

The UDM Pro is not set up for automatic updates for reliability reasons.

+

Network Speeds

+

We have a 10 GB/s link to DCU's core.

+

Users

+

The current elected admins should all have access to the rbadmin account on the firewall. Rootholders should not have access to the firewall unless they are explicitly granted access.

+

The owner account of the unifi equipment is rbadmins (email: elected-admins@redbrick.dcu.ie) with the password stored in pwsafe under unifi.

+

There is a "super admin" account that can be used for local access only, details are stored in pwsafe under udmpro-super-admin.

+

Updates

+

The UDM Pro should be kept up to date at all times using the web interface. Please ensure there are no breaking changes before updating.

+
+

AUTO UPDATES SHOULD NEVER BE ENABLED!

+

This is to prevent a bad update from breaking the UDM Pro and thus the entire network.
+If you are confident that Unifi can produce stable updates, you may turn it on, however please let the next admins know that you have done this (and update these docs with a comment!).

+
+

Advanced Settings

+

SSH is enabled to allow for rollbacks in case of a bad update (I warned you!).

+

Remote access is disabled as it should not be needed, the admin VPN should provide enough access for you. If it is enabled in future, please update these docs with your reasons.

+

Backups

+

Backups are configured to run every week at 1am on a Sunday. 20 backups are stored at a time, therefore storing 20 weeks of configuration. This should be plenty of time to recover from a bad configuration change.

+

External Addresses

+

Mordor is NATted when it accesses the Internet. This is because the link address between it and DCU is on a private address.

+

This NATting is used only for the UDM pro device itself, not for the 136.206.16.0/24 network, and is to allow the UDM box itself to access the Internet.

+

The 136.206.16.0/24 network is routed down to the UDM pro box, within the DCU network. Essentially there is a route in DCU's network that says "if you want to access 136.206.16.0/24 go to mordor".

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/hardware/network/switches/index.html b/hardware/network/switches/index.html new file mode 100644 index 00000000..e81c6ca9 --- /dev/null +++ b/hardware/network/switches/index.html @@ -0,0 +1,2155 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + switches - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

switches

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/hardware/nix/hardcase/index.html b/hardware/nix/hardcase/index.html new file mode 100644 index 00000000..8244e493 --- /dev/null +++ b/hardware/nix/hardcase/index.html @@ -0,0 +1,2279 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Hardcase - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

Hardcase

+

Details

+
    +
  • Type: Dell PowerEdge R410
  • +
  • OS: NixOS
  • +
  • CPU: 2 x Intel Xeon X5570 @ 2.93GHz
  • +
  • RAM: 48GB, incorrectly populated
  • +
  • Storage: LSI Logic SAS1068E "Fake" RAID controller
  • +
  • Disks: 2 x 500GB SATA disks in RAID 1
  • +
  • Drives: Internal SATA DVD±RW
  • +
  • Network: 2x Onboard Ethernet, 802.3ad bonding
  • +
  • iDRAC NIC: Shared on port 1
  • +
  • iDRAC IP is 1.158
  • +
+

Where to Find

+
    +
  • Internal:
      +
    • 192.168.0.158
    • +
    +
  • +
  • External:
      +
    • 136.206.15.3
    • +
    +
  • +
+

Services

+
    +
  • postgreSQL
  • +
  • apache
  • +
  • monitoring
  • +
  • postfix (SMTP)
  • +
  • dovecot (IMAP)
  • +
  • mailman - mailing lists
  • +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/hardware/nix/icarus/index.html b/hardware/nix/icarus/index.html new file mode 100644 index 00000000..10ef715d --- /dev/null +++ b/hardware/nix/icarus/index.html @@ -0,0 +1,2283 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Icarus - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

Icarus

+

Daedalus and Icarus are were twins and thus share documentation.

+

However, Daedalus is now Deadalus and Icarus lives on for now albeit a little sick.

+

Details

+
    +
  • Type: Dell PowerEdge 2950
  • +
  • OS: NixOS
  • +
  • CPU: 2x Intel Xeon L5335 @ 2.00GHz
  • +
  • RAM: 32GB (Daedalus), 16GB (Icarus)
  • +
  • Storage: Dell Perc 6/i Integrated RAID controller
  • +
  • Disks:
      +
    • 2 x 73GB SAS disks in RAID 1 (hardware)
    • +
    • 3 x 600GB SAS disks in passthrough (3x RAID 0)
    • +
    +
  • +
  • Drives: Internal SATA DVD±RW
  • +
  • Network: 2x Onboard Ethernet, 802.3ad bonding
  • +
  • iDRAC NIC: Shared on port 1
  • +
+

Where to Find

+
    +
  • Internal:
      +
    • 192.168.0.150
    • +
    +
  • +
+

Services

+
    +
  • LDAP
  • +
  • NFS, (a.k.a /storage)
  • +
  • GlusterFS, eventually, or some other distributed storage to replace NFS
  • +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/hardware/nix/motherlode/index.html b/hardware/nix/motherlode/index.html new file mode 100644 index 00000000..2836a107 --- /dev/null +++ b/hardware/nix/motherlode/index.html @@ -0,0 +1,2275 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Motherlode - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

Motherlode

+

Details

+

(Something should go here probably)

+

Where to Find

+ +

Services

+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/hardware/paphos/index.html b/hardware/paphos/index.html new file mode 100644 index 00000000..2094db09 --- /dev/null +++ b/hardware/paphos/index.html @@ -0,0 +1,2291 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Paphos - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

Paphos

+

Details

+
    +
  • Type: Dell PowerEdge R710
  • +
  • OS: Ubuntu 14.04.5 LTS 😭
  • +
  • CPU: 2 x Intel Xeon CPU E5620 @ 2.40Ghz
  • +
  • RAM: 16GB
  • +
  • Drives: Internal SATA DVD±RW
  • +
  • Network: NetXtreme II BCM5709 Gigabit Ethernet
  • +
+

Where to Find

+
    +
  • Internal:
      +
    • 192.168.0.26
    • +
    • 192.168.0.4
    • +
    • 10.5.0.6
    • +
    +
  • +
  • External:
      +
    • 136.206.15.26
    • +
    • 136.206.15.52
    • +
    • 136.206.15.53
    • +
    • 136.206.15.55
    • +
    • 136.206.15.57
    • +
    • 136.206.15.58
    • +
    • 136.206.15.101
    • +
    • 136.206.15.54
    • +
    • 136.206.15.74
    • +
    • ns1.redbrick.dcu.ie
    • +
    +
  • +
+

Services

+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/hardware/pygmalion/index.html b/hardware/pygmalion/index.html new file mode 100644 index 00000000..c2242144 --- /dev/null +++ b/hardware/pygmalion/index.html @@ -0,0 +1,2270 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Pygmalion - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

Pygmalion

+

Details

+
    +
  • Type: Intel(R) Xeon (R)
  • +
  • OS: Debian 12 bookworm
  • +
  • CPU: 2x Intel (R) Xeon (R) E5620 2.46GHz
  • +
  • RAM: 16GB
  • +
  • Network: 4x Broadcom Corporation NetXtreme II BCM5709 Gigabit Ethernet
  • +
+

Where to Find

+
    +
  • Internal:
      +
    • 192.168.0.25
    • +
    +
  • +
  • External:
      +
    • 136.206.15.25
    • +
    • pygmalion.redbrick.dcu.ie
    • +
    • pyg.redbrick.dcu.ie
    • +
    +
  • +
+

Services

+
    +
  • secondary ssh login box for users (see Logging in)
  • +
  • jump-box for admins
  • +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/hardware/zeus/index.html b/hardware/zeus/index.html new file mode 100644 index 00000000..59bde209 --- /dev/null +++ b/hardware/zeus/index.html @@ -0,0 +1,2278 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Zeus - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

Zeus

+

Details

+
    +
  • Type: Dell PowerEdge R410
  • +
  • OS: Ubuntu 18.04
  • +
  • CPU: 2x Intel(R) Xeon (R) x5570 @ 2.93 GHz
  • +
  • RAM: 32GB
  • +
  • Network: 2x NetXtreme II BCM5716 Gigabit Ethernet
  • +
+

Where to Find

+
    +
  • Internal:
      +
    • 192.168.0.131
    • +
    +
  • +
  • External:
      +
    • 136.206.15.31
    • +
    +
  • +
+

Services

+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/index.html b/index.html new file mode 100644 index 00000000..7abbdb78 --- /dev/null +++ b/index.html @@ -0,0 +1,2235 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Home - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

Home

+

Redbrick Docs

+

Welcome to Redbrick's documentation. This is to keep up to date information about the technical infrastructure of Redbrick.

+

This is mostly intended for admins, future admins, webmasters, and everybody else who is grumpy and has no life.

+ + +

Webgroup

+

New Admins

+

So, you want to become an admin. Brave of you. Here's some stuff you should probably read:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/js/timeago.min.js b/js/timeago.min.js new file mode 100644 index 00000000..a8530a5f --- /dev/null +++ b/js/timeago.min.js @@ -0,0 +1,2 @@ +/* Taken from https://cdnjs.cloudflare.com/ajax/libs/timeago.js/4.0.2/timeago.min.js */ +!function(s,n){"object"==typeof exports&&"undefined"!=typeof module?n(exports):"function"==typeof define&&define.amd?define(["exports"],n):n((s=s||self).timeago={})}(this,function(s){"use strict";var a=["second","minute","hour","day","week","month","year"];function n(s,n){if(0===n)return["just now","right now"];var e=a[Math.floor(n/2)];return 1=m[t]&&t=m[e]&&e 0) { + var locale = getLocale(nodes[0]); + timeago.render(nodes, locale); + } + }) +} else { + var nodes = document.querySelectorAll('.timeago'); + if (nodes.length > 0) { + var locale = getLocale(nodes[0]); + timeago.render(nodes, locale); + } +} diff --git a/overrides/main.html b/overrides/main.html new file mode 100644 index 00000000..3cf042d3 --- /dev/null +++ b/overrides/main.html @@ -0,0 +1,11 @@ +{% extends "base.html" %} + +{% block footer %} + +{% endblock %} diff --git a/procedures/ansible/index.html b/procedures/ansible/index.html new file mode 100644 index 00000000..8d06b94e --- /dev/null +++ b/procedures/ansible/index.html @@ -0,0 +1,2379 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Ansible - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

Ansible

+

Redbrick uses ansible to manage its infrastructure. This document describes the procedures and some tips to get the most out of it.

+

Getting Started

+

Installing Ansible

+

Ansible is a python package, so you'll need to install python first. On Debian/Ubuntu, you can do this with:

+
Bash
pip install ansible
+
+

Add an SSH Key

+

Ansible uses ssh to connect to the remote hosts. You'll need to set up your ssh key so that you can connect to the hosts without constant prompts for passwords.

+

Create a Hosts File

+

This is used a phonebook of sorts for ansible. It tells ansible which hosts to connect to, and what user to use.

+
INI
[aperture]
+glados
+wheatley
+chell
+
+[aperture:vars]
+ansible_user= <your username>
+
+
+

Contact @distro for a fully populated file.

+
+

Test it out

+
Bash
ansible all -m ping
+
+

This should connect to all the hosts in the aperture group, and run the ping module. If it works, you're good to go!

+

Playbooks

+

Ansible playbooks are a set of instructions for ansible to run. They're written in YAML, and are usually stored in a file called playbook.yml.

+

Writing a Playbook

+

Ansible playbooks are written in YAML. The basic structure is:

+
YAML
- hosts: <group name>
+  tasks:
+    - name: <task name>
+      <module name>:
+        <module options>
+
+

Example

+
YAML
- hosts: aperture
+  tasks:
+    - name: Install curl
+      apt:
+        name: curl
+        state: present
+
+

This playbook will connect to all the hosts in the aperture group, and run the apt module with the name and state options.

+

Running a Playbook

+
Bash
ansible-playbook playbook.yml -i hosts
+
+

More Information

+

Redbrick's ansible configuration is stored in the ansible folder in the redbrick/nomad repository. There's some more documentation there on each playbook.

+

Ansible's documentation is available here.

+

Common Errors

+

Hashicorp Apt Key

+

Sometimes, when running a playbook, you'll get an error like this:

+
Bash
TASK [apt : apt update packages to their latest version and autoclean] ***************************************************************************************************
+fatal: [wheatley]: FAILED! => {"changed": false, "msg": "Failed to update apt cache: unknown reason"}
+fatal: [chell]: FAILED! => {"changed": false, "msg": "Failed to update apt cache: unknown reason"}
+fatal: [glados]: FAILED! => {"changed": false, "msg": "Failed to update apt cache: unknown reason"}
+
+

This is because the Hashicorp apt key has expired. To fix this, uncomment the hashicorp-apt task in the playbook.

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/procedures/cheatsheet/index.html b/procedures/cheatsheet/index.html new file mode 100644 index 00000000..68203551 --- /dev/null +++ b/procedures/cheatsheet/index.html @@ -0,0 +1,2340 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Cheatsheet - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

Cheatsheet

+

LDAP

+
    +
  • Query a user
  • +
+
Bash
ldapsearch -x uid="USERNAME_HERE"
+
+
    +
  • Query user as root for more detailed info
  • +
+
Bash
ldapsearch -D "cn=root,ou=services,o=redbrick" -y /etc/ldap.secret uid=user
+
+
    +
  • Find all users emails created by USERNAME
  • +
+
Bash
ldapsearch -x createdby="user" uid | awk '/uid:/ {print $2"@redbrick.dcu.ie"}'
+
+
    +
  • Check if something is backed up on NFS (/storage/path/to/file)
  • +
+

All useful LDAP scripts (edit user quota, reset user password, renew user accounts, etc) are located in the home directory of root on Azazel.

+

Log in as root on a server with local accounts:

+
Bash
ssh localaccount@redbrick.dcu.ie
+sudo -i # (same password as localaccount account)
+
+
+

Authentication/Passwords

+

Onboarding New Admins

+
    +
  • Create root ssh key for NixOS Machines
    +Following creation of the key, add to the whitelist in nix configs.
  • +
+
Bash
ssh-keygen -t ed25519 # Generate key
+cat ~/.ssh/id_ed25519.pub # Verify it's been created
+ssh-copy-id -i ~/.ssh/id_ed25519 user@redbrick.dcu.ie # Copy to local account's ssh dir
+ssh -i ~/.ssh/mykey user@redbrick.dcu.ie # Verify that this key was copied
+
+

Access Passwordsafe (pwsafe)

+

Location of master password vault.

+
+

Note:

+

getpw will prompt you for the Master root password.

+
+
Bash
ssh localroot@halfpint
+sudo -i # to log in as root with local user password
+pwsafe # to list passwords
+getpw <name_of_pass> # Grab password by name key | getpw pygmalion
+
+
+

SSH to Root on a NixOS Machine

+
    +
  • From the account you generated your ssh key on (in nix configs) type:
  • +
+
Bash
ssh root@hardcase.internal
+
+
+

NixOS

+
    +
  • Install a temporary program
  • +
+
Bash
nix-shell -p [space seperated package names]
+
+
    +
  • Run brickbot2 (running on Metharme)
  • +
+
Bash
cd brickbot2
+nix-shell
+source venv/bin/activate
+python3 main.py config.toml
+
+

Brickbot runs in tmux a -t 0 and can be restarted by pressing ctrl+c and running the above python command

+

Minecraft Servers

+

The Redbrick Minecraft server's are dockerized applications running on zeus on a server-per-container basis, using the tools on this GitHub Repo: https://github.com/itzg/docker-minecraft-server#interacting-with-the-server

+

Repo is very well documented so have a look at the README but here's the basics:

+

NOTE: Local Root accounts must be added to the docker group before they can run the docker commands. usermod -a -G docker ACCOUNT_NAME

+

You can docker ps | grep minec to find the docker containers running the servers.

+

The docker compose files are located in /etc/docker-compose/services, Unmodded Vanilla compose for example is in /etc/docker-compose/services/minecraft_unmodded/

+

To see the configuration for the container you can do docker inspect CONTAINER_NAME_OR_ID

+
    +
  • Interacting with the Server Console
      +
    • https://github.com/itzg/docker-minecraft-server#interacting-with-the-server
    • +
    +
  • +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/procedures/handover/index.html b/procedures/handover/index.html new file mode 100644 index 00000000..9178d15a --- /dev/null +++ b/procedures/handover/index.html @@ -0,0 +1,2211 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Handover - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

Committee Handover

+

When a new committee is elected, there are many things to hand over. This is a list of those things.

+

Passwords

+

All passwords should be rotated as soon as possible. This is to ensure that passwords are rotated, and that the old committee can no longer access Redbrick using the old passwords. The passwords are stored in Bitwarden, and the master password should be rotated first and foremost.

+

2-Factor Authentication

+

The Chair holds the 2FA key for the Bitwarden account.

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/procedures/index.html b/procedures/index.html new file mode 100644 index 00000000..f405a9c4 --- /dev/null +++ b/procedures/index.html @@ -0,0 +1,2159 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Procedures - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

Procedures

+

Here you can find a list of various procedures useful for the day-to-day running of Redbrick

+

New elected admins

+

Cheatsheet

+

Admin VPN

+

Ansible

+

Post-powercut Todo List

+

NixOS

+

Updating WordPress Domains

+

IRC Ops

+

Committee Handover

+

Redbrick System Administrator Policies

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/procedures/irc-ops/index.html b/procedures/irc-ops/index.html new file mode 100644 index 00000000..fdb50cc0 --- /dev/null +++ b/procedures/irc-ops/index.html @@ -0,0 +1,2295 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + IRC Ops - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

IRC Ops

+

This is a mirror of:

+

Redbrick cmt Wiki entry

+

Channel Modes

+

It's easy to bugger up the channel with the MODE command, so here's a nice copied and pasted summary of how to use it:

+
    +
  • /mode {channel} +b {nick|address} - ban somebody by nickname or address mask (nick!account@host)
  • +
  • /mode {channel} +i - channel is invite-only
  • +
  • /mode {channel} +l {number} - channel is limited, with {number} users allowed maximal
  • +
  • /mode {channel} +m - channel is moderated, only chanops and others with 'voice' can talk/mode {channel} +n external /MSGs to channel are not allowed.
  • +
  • /mode {channel} +p - channel is private
  • +
  • /mode {channel} +s - channel is secret
  • +
  • /mode {channel} +t topic - limited, only chanops may change it
  • +
  • /mode {channel} +o {nick} - makes {nick} a channel operator
  • +
  • /mode {channel} +v {nick} - gives {nick} a voice
  • +
+

Other Commands

+

Basically what you'll be using is:

+
    +
  • To kick someone: /kick username
  • +
  • To ban someone: /mode #lobby +b username
  • +
  • To set the topic: /topic #lobby whatever
  • +
  • To op someone: /mode #lobby +o someone
  • +
  • To op two people: /mode #lobby +oo someone someone_else
  • +
+

Or:

+
    +
  • To kick someone: /k username
  • +
  • To ban someone: /ban username
  • +
  • To unban someone: /unban username
  • +
  • To set the topic: /t whatever
  • +
  • To op someone: /op someone
  • +
  • To op two people: /op someone someone_else
  • +
  • To deop someone: /deop someone
  • +
+

Sysop Specific Commands

+

These commands can only be run by sysops (i.e. admins in the ircd config file).

+
    +
  • Enter BOFH mode (required for all sysop commands): /oper
  • +
  • Peer to another server*: /sconnect <node name>
  • +
  • Drop a peer with another server: /squit <node name>
  • +
  • Force op yourself (do not abuse): /quote opme <channel name>
  • +
  • Barge into a channel uninvited (again, do not abuse):/quote ojoin #channel
  • +
  • Barge into a channel uninvited with ops (same again): /quote ojoin @#channel
  • +
  • Force someone to join a channel: /quote forcejoin nick #channel
  • +
  • Kill someone: /kill <username> <smartassed kill messsage>
  • +
  • Ban someone from this server: /kline <username> (there may be more params on this)
  • +
  • Ban someone from the entire network: /gline <username> (there may be more params on this)
  • +
+

(thanks to atlas for the quick overview)

+
    +
  • Don't try connect to intersocs. Due to crazy endian issues or something they have to connect to us.
  • +
+

Bots

+

It has now become a slight problem with so many bots 'littering' #lobby that anyone wishing to add a new bot to the channel must request permission from the Committee. The main feature wanted is a time limit on bot commands.

+

Services

+

The IRC services run by Trinity for all the netsocs. The two services are

+

NickServ and ChanServ.

+
    +
  • /msg NickServ HELP
  • +
  • /msg ChanServ HELP
  • +
+

for more details.

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/procedures/new-admins/index.html b/procedures/new-admins/index.html new file mode 100644 index 00000000..ff0e9f19 --- /dev/null +++ b/procedures/new-admins/index.html @@ -0,0 +1,2243 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + New Elected Admins - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

New Elected Admins

+

The chronological process of becoming an admin usually looks very similar each year. There are some important things you should know.

+

Remember, being a SysAdmin for the society is not a job, it is a volunteered task you sign up to - don't stress yourself out over it, have fun, and hopefully learn a thing or two. : )

+

Process

+

Admin Exam

+

Anyone wishing to run and be elected as a SysAdmin must complete a technical exam as an assessment of your knowledge and competency in solving some of the many problems that will be thrown at you.

+

You can find some archives of past exams here, however note that these vary year to year as they are created each year by the currently elected admins.

+

Election at AGM

+

At the annual general meeting, you may nominate yourself, or have someone nominate you to run for SysAdmin. You may only run if you have passed the Admin exam.

+

The amount of admins per year is usually three, to be elected, you must be in the top three voted members.

+

Onboarding

+

If you are successfully elected - congrats! We welcome you to this pain joy filled journey :)

+

After being elected it is your time to learn the ropes and become familiar with the technicalities of Redbrick.

+

Not alone of course! The previous Admins will assist you on this journey and be there to answer any of your questions, along with this documentation.

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/procedures/nixos/index.html b/procedures/nixos/index.html new file mode 100644 index 00000000..93c741f0 --- /dev/null +++ b/procedures/nixos/index.html @@ -0,0 +1,2261 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + NixOS - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

NixOS

+

Familiarise yourself with the layout of the following. Bookmarking the page is also a good shout.

+

NixOS documentation

+

Who is NixOS and what Does He Do

+

NixOS is a distribution of linux that is focused on having a config-first operating system to run services. The advantages of such an approach are the following:

+
    +
  • Files dictate how an installation is set up, and as such, can be versioned and tracked in your favourite VCS.
  • +
  • New configs can be tested, and safely rolled back.
  • +
  • Can be used for both physical and virtual machines in the same way.
  • +
+

Further reading on this can be found on the about page.

+

Being an Admin: NixOS and You

+

There's a couple of things you'll need to do before you get started with NixOS:

+ +

Depending on the powers that be, some sort of normal pr contribution will be acceptable, if you have access a branch is appropriate, in all other cases make a fork and pr back to Redbrick's repo. This will be case by case for those of you reading.

+

Here's a quick hit list of stuff that's worthy of book marking also as you work with Nix:

+ +

Nix is pretty small as an OS so setting yourself up a node, either as a home server, or as a VM is a solid way to practice how stuff works in an actual environment and lets you work independently of Redbrick. A service you configure at home should be able to run on Redbrick, and vice versa.

+

Getting Set up to Start Deploying Stuff

+
    +
  • +

    The first step is to navigate to the ssh service config in the nix-config repo here.

    +
  • +
  • +

    Make a pull request asking to add the PUBLIC KEY of your ssh key pait to the config file.

    +
      +
    • The best thing to do is to copy the previous line and modify it to contain your details instead.
    • +
    • At time of writing, it is expected for you to generate a ssh-ed25519 key. This is subject to change with new cryprographic standards.
    • +
    +
  • +
  • Once this is done, contact one of the currently set up users to pull and reload the given machines and you'll have access right away using the accompanying key.
  • +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/procedures/open-governance-tagging/index.html b/procedures/open-governance-tagging/index.html new file mode 100644 index 00000000..894f8e7e --- /dev/null +++ b/procedures/open-governance-tagging/index.html @@ -0,0 +1,2461 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Open Governance Tagging - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

Open Governance Tagging - hypnoant, wizzdom

+

1. Before the Tagging Ceremony

+

Generating the Key

+

To tag the Open Governance repo you will need to make a new PGP key on the behalf of redbrick committee. Below are the commands and the inputs for creating this key.

+
Bash
gpg --full-generate-key
+
+
Key Generation Menu
Please select what kind of key you want:
+   (1) RSA and RSA
+   (2) DSA and Elgamal
+   (3) DSA (sign only)
+   (4) RSA (sign only)
+   (9) ECC (sign and encrypt) *default*
+  (10) ECC (sign only)
+  (14) Existing key from card
+Your selection? 1
+
+RSA keys may be between 1024 and 4096 bits long.
+What keysize do you want? (3072) 4096
+
+Please specify how long the key should be valid.
+         0 = key does not expire
+      <n>  = key expires in n days
+      <n>w = key expires in n weeks
+      <n>m = key expires in n months
+      <n>y = key expires in n years
+Key is valid for? (0) {SET FOR DATE AFTER TAGGING CEREMONY}
+
+Key expires at {DATE AFTER TAGGING CEREMONY} IST
+Is this correct? (y/N) y
+
+GnuPG needs to construct a user ID to identify your key.
+Real name: Redbrick Committee
+Email Address: committee@redbrick.dcu.ie
+Comment: Redbrick Committee (Redbrick Open Governance {YEAR-MONTH-TYPE_OF_MEETING(AGM/EGM)})
+
+Change (N)ame, (C)omment, (E)mail or (O)kay/(Q)uit? O
+
+

First Sign

+

The signatory who has generated the key will then sign this key.

+
Bash
gpg --sign-key {REDBRICK KEY-ID}
+
+

You will then publish this public key to a key-server (e.g. keyserver.ubuntu.com or keys.openpgp.org).

+
Bash
gpg --keyserver keyserver.ubuntu.com --send-key committee@redbrick.dcu.ie
+
+

Second Sign

+

The other signatory will pull the key from the key-server and will then sign this key and re-publish the key to the key-server. (You can use the more secure method below for general membership if you wish).

+
Bash
gpg --keyserver keyserver.ubuntu.com --recv-key {REDBRICK KEY-ID}
+
+gpg --sign-key {REDBRICK KEY-ID}
+
+gpg --keyserver keyserver.ubuntu.com --send-keys {REDBRICK KEY-ID}
+
+

To verify this procedure has worked and that both signatories have signed it. We will have the first signatory pull the key back down and verify the signatures.

+
Bash
gpg --keyserver-options no-self-sigs-only --keyserver keyserver.ubuntu.com --recv-key {REDBRICK KEY-ID}
+
+

General Membership Sign

+

The society now has the option to publish this key to the general membership for them to also sign this key if the current committee wishes to do so. The committee will have to release an email address or another service for the general membership to send files to.

+

Below is the process for a member of the general membership to sign the key.

+
Bash
gpg --recv-keys {REDBRICK KEY-ID}
+gpg --sign-key {REDBRICK KEY-ID}
+gpg --armor --export {REDBRICK KEY-ID} | gpg --encrypt -r {REDBRICK KEY-ID} --armor --output {REDBRICK KEY-ID}-signedBy-{OWNER KEY ID}.asc
+
+

They will then send this file to the signatories.

+

The signatories will then use the following commands to import and publish their key with the new signature. This must be done before the

+
Bash
gpg -d {REDBRICK KEY-ID}-signedBy-{OWNER KEY ID}.asc  | gpg --import
+gpg --send-key {REDBRICK KEY-ID}
+
+

2. During the Tagging Ceremony

+

The first signatory shall tag the repository with the following command and styling. There shall be at least 2 witnesses separated by commas.

+
Bash
git tag -as {YYYY-MM-TYPEOFMEETING} {COMMIT ID}
+
+
Git Tag Message
Co-authored-by: {Signatory 2}
+
+Witnessed-by: ~{WITNESS}
+
+See `knowledge/tagging.md` for more info.
+
+

They can then push this tag to the GitHub

+
Bash
git push --tags origin
+
+

3. After the Tagging Ceremony

+

Verifying the Tag

+

Clone the git repository

+
Bash
git clone https://github.com/redbrick/open-governance.git
+
+

View the tag

+
Bash
git tag -v {YYYY-MM-TYPEOFMEETING}
+
+

Import the key

+

There should be a key signature at the bottom of the tag view. This should be imported into your key-ring. There may be a separate key-server used for the given years key so verify with committee that it is on the correct server for importing.

+
Bash
gpg --keyserver-options no-self-sigs-only --keyserver keyserver.ubuntu.com --recv-key {REDBRICK KEY-ID}
+
+

Verify the tag

+
Bash
git tag -v {YYYY-MM-TYPEOFMEETING}
+
+

Check the signatories

+
Bash
gpg --list-sigs {REDBRICK KEY-ID}
+
+

Import the signatories keys

+
Bash
gpg --list-sigs {REDBRICK KEY-ID} --keyid-format long | grep 'ID not found' | perl -nwe '/([0-9A-F]{16})/ && print "$1\n"' | xargs gpg --keyserver-options no-self-sigs-only --keyserver keyserver.ubuntu.com  --recv-keys
+
+

Export their key

+
Bash
gpg --export -a {SIGNATORY KEY-ID}
+
+

Their key should be available at their GitHub under https://github.com/{USERNAME}.gpg

+

Externally Hosted Repos

+

Uploading the Repo

+
    +
  • First verify that the repo is correctly tagged and signed following the previous steps.
  • +
  • Download the zip of the tag from GitHub webpage. (Or clone the repo, checkout the tag and zip the folder)
  • +
  • Sign the Zip and verify it:
  • +
+
Bash
gpg --sign {NAME OF ZIP}.zip
+gpg --verify {NAME OF ZIP}.zip.gpg
+
+
    +
  • Export public key:
  • +
+
Bash
gpg --export -a {KEY-ID} > {MYKEYID}
+
+
    +
  • Upload the .zip.gpg file and your public key
  • +
+

Users Verifying the Hosted Zip

+
Bash
gpg --import {KEYID}
+gpg --verify {NAME OF ZIP}.zip.gpg
+
+
    +
  • Exporting the zip file:
  • +
+
Bash
gpg --output {NAME OF ZIP}.zip --decrypt {NAME OF ZIP}.zip.gpg
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/procedures/policies/index.html b/procedures/policies/index.html new file mode 100644 index 00000000..313e5e5e --- /dev/null +++ b/procedures/policies/index.html @@ -0,0 +1,2311 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Systems Administrator Policies - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

Redbrick System Administrator Policies

+

The purpose of this is to brief new Redbrick system administrators on the current setup, policies and practices in place and to serve as the place to record all such information for current and future administrators.

+

Admin Account Priviliges

+
    +
  • By default, all admin accounts will remain the same as the rest of the committee.
  • +
  • Each admin will recieve a local account on each machine that will be in the root group. This allows you to log on if ldap goes down.
  • +
  • Accounts should not be placed into any other 'system' or privileged accounts (e.g. pgSQL, mail, news, etc.) but by all accounts (hah, bad pun!) can be placed into useful groups (e.g. cvs, webgroup, helpdesk etc.)
  • +
+

Root account

+

When su'ing to root, please observe the following:

+
    +
  • Wait for the password prompt before typing in the password! Sometimes lag/terminal freezes or whatever can kick in. The other classic mistake is typing the password in place of the username (say for a console login).
  • +
  • Make sure LOGNAME is set to your UNIX name. The Linux boxes will prompt you for this. On OpenBSD you can use 'su -m' to keep the environment.
  • +
  • Don't change the root account/finger information!
  • +
  • If you wish to use another shell, place customisations in your own file. For bash, /root/.bash_profile.<USERNAME> and for zsh /root/.zshrc.<USERNAME>.
  • +
+

/root/.zshrc and /root/.bash_profile source in the appropriate file as long as $LOGNAME is set right (see above). Do not put personal customisations into the default root account setup, remember other people have to use it.

+

Common aliases can be put in /root/.profile, familiarise yourself with the existing ones, they can come in handy.

+
    +
  • Please keep /root tidy. Don't leave stuff strewn about the place!
  • +
  • Make sure to check permissions and ownership on files you work on constantly especially files with important or sensitive information in them (e.g. always use cp -p when copying stuff about).
  • +
  • Only use root account when absolutely necessary. Many admin tasks can be done or tested first as a regular user.
  • +
+

Gotchas

+

Couple of things to look out for:

+
    +
  • killall command, never ever use it!
  • +
  • Alias cp, mv & rm with the -i option.
  • +
  • If you're ever unsure, don't! Ask another admin or check the docs.
  • +
  • Always always double check commands before firing them off!
  • +
+

Admin Mailing Lists

+

lists.redbrick.dcu.ie (Postorius)

+
    +
  • All accounts in the root group must be on the admin mailing list and vice versa. Admins who leave/join the root group must be added/removed from the list respectively.
  • +
  • Elected Admins should also be on the elected-admins list. This address is mainly used for mail to PayPal, user renewals, registration, and general administration tasks.
  • +
  • It is the responsibility of the Elected Admins to ensure that all mailing lists (committee, helpdesk, webmaster, elected-admins, admins, etc) are all up-to-date.
  • +
+

Admin Account Responsibilities

+

As an administrator, your new local account has extra privileges (namely being in the root group).

+

For this reason, you should not run any untrusted or unknown programs or scripts.

+

If you must, and source code is available you should check it before running it. Compile your own versions of other user's programs you use regularly. It is far too easy for other users to trojan your account in this manner and get root.

+

Do not use passwordless ssh keys on any of your accounts. When using an untrusted workstation (i.e. just about any PC in DCU!) always check for keyloggers running on the local machine and never use any non system or non personal copies of PuTTY/ssh - there's no way of knowing if they have been trojaned.

+

General Responsibilities

+

Look after and regularly monitor all systems, network, hardware and user requests (ones that fall outside of helpdesk's realm, of course!).

+

Actively ensure system and network security. We can't police all user accounts and activities, but basic system security is paramount! Keep up to date with bugtraq/securityfocus etc. Check system logs regularly, process listings, network connections, disk usage, etc.

+

Downtime

+

All downtime must be scheduled and notified to the members well in advance by means of motd & announce. If it's really important, a mail to announce-redbrick and socials post may be necessary.

+

All unexpected/unscheduled downtime (as a result of a crash or as an emergency precaution) must be explained to the members as soon as possible after the system is brought back. A post to announce, notice in motd or possibly a mail to committee/admins is sufficient.

+

When performing a shutdown, start the shutdown 5 or 10 minutes in advance of the scheduled shutdown time to give people a chance to logout. It may also be useful to disable logins at this stage with a quick explanation in /etc/nologin.

+

Documentation

+

Please read all the documentation before you do anything, but remember that the docs aren't complete and are sometimes out of date. Please update them as you go :D

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/procedures/post-powercut/index.html b/procedures/post-powercut/index.html new file mode 100644 index 00000000..21d5a234 --- /dev/null +++ b/procedures/post-powercut/index.html @@ -0,0 +1,2207 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Post-powercut Todo List - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

Post-powercut Todo List

+

A list of things that should be done/checked immediately after a power cut:

+
    +
  • Ensure the aperture servers have the correct IP addresses:
      +
    • eno1 should have the internal IP address (10.10.0.0/24) - this should be reserved by DHCP on mordor
    • +
    • eno2 should have no IP address
    • +
    • br0 should have the external IP address (136.206.16.0/24) - this should also be reserved by DHCP on mordor
    • +
    +
  • +
  • If the bastion-vm fails to start, check:
      +
    • /storage is mounted rw on each aperture server
    • +
    • br0 is present and configured on each aperture server
    • +
    • vm-resources.service.consul is running and http://vm-resources.service.consul:8000/bastion/bastion-vm-latest.qcow2 is accessible
    • +
    • if the latest symlink points to a corrupted image, ln -sf it to an earlier one
    • +
    +
  • +
  • All the nixos boxes rely on DNS for LDAP and NFS:
      +
    • Make sure bind is running on paphos
    • +
    • mount /storage
    • +
    • systemctl restart httpd, php-fpm-rbusers-* and ldap
    • +
    +
  • +
  • Apache on hardcase sometimes tries to start before networking is finished starting. To fix it, disable/re-enable it a few times. This usually makes it turn on.
  • +
  • Mailman on hardcase has a lock file at /var/lib/mailman/lock/master.lck. If it doesn't shut down correctly, this lock file will block mailman from starting up. Remove it with:
  • +
+
Bash
rm /var/lib/mailman/lock/master.lck
+
+
    +
  • paphos is old and sometimes its time will become out of sync. To make sure its time is accurate, run:
  • +
+
Bash
sudo service ntp restart
+
+

and ensure you have the correct time with date

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/procedures/update-wp-domain/index.html b/procedures/update-wp-domain/index.html new file mode 100644 index 00000000..6416fd67 --- /dev/null +++ b/procedures/update-wp-domain/index.html @@ -0,0 +1,2231 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Update a WordPress Domain - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

Update a WordPress Domain - wizzdom, distro

+

Redbrick hosts a variety of services and websites for various clubs and societies in DCU. Oftentimes these websites hosted for societies run on WordPress due to it's ease of use.

+

However, what happens when you no longer have access to the domain? You can change the domain on the webserver however WordPress will redirect you to the old domain. In this case you must update the database to change the domain. This happened with TheCollegeView in 2023, you can read more about that here

+

SQL Commands

+
+

BACKUPS!!!

+

Ensure you have a recent backup of the database by checking /storage/backups

+
+
    +
  • First, check what the current value is
  • +
+
SQL
-- validate current setting
+select option_name,option_value from wp_2options where( option_name="siteurl" or option_name="home");
+
+
    +
  • Now, update the option with the new value
  • +
+
SQL
-- update to new value
+update wp_2options set option_value="http://www.thecollegeview.redbrick.dcu.ie" where( option_name="siteurl" or option_name="home");
+
+
    +
  • Verify that the new value is set correctly
  • +
+
SQL
-- validate new value
+select option_name,option_value from wp_2options where( option_name="siteurl" or option_name="home");
+
+
    +
  • Now, the same again but for the post content and guid
  • +
+
SQL
-- update post content with new domain
+update wp_2posts set post_content = replace(post_content,"://www.thecollegeview.com/","://thecollegeview.redbrick.dcu.ie/");
+
+-- update the guid with the new domain
+update wp_2posts set guid = replace(guid,"://www.thecollegeview.com/","://thecollegeview.redbrick.dcu.ie/");
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/procedures/vpn/index.html b/procedures/vpn/index.html new file mode 100644 index 00000000..5bc53079 --- /dev/null +++ b/procedures/vpn/index.html @@ -0,0 +1,2241 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Admin VPN - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

Admin VPN

+

The admin VPN is set up to allow admins to access the network from outside of DCU, giving them an IP address on the internal network for troubleshooting, testing and integrating.

+

If you just want to create a new client configuration, go here: adding a new client

+

Setup

+

Installed OpenVPN using this script on glados.

+

Adding a New Client

+

To add a new client, run the following command (as root) on Glados:

+
Bash
bash /root/ovpn/openvpn-install.sh
+
+

You will be prompted to add a new client, enter a name for the client and then the script will generate a new client.

+

It will be saved in /root/[client name].ovpn.

+

Revoking a Client

+

To revoke a client, run the following command (as root) on Glados:

+
Bash
bash /root/ovpn/openvpn-install.sh
+
+

You will be prompted to revoke a client, enter the name of the client you want to revoke.

+

Connecting to the VPN

+

To connect to the VPN, you will need to download the client configuration file from glados and then import it into your OpenVPN client.

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/res/cables-glados-wheatley-chell.jpg b/res/cables-glados-wheatley-chell.jpg new file mode 100644 index 00000000..40299a7f Binary files /dev/null and b/res/cables-glados-wheatley-chell.jpg differ diff --git a/res/chell.png b/res/chell.png new file mode 100644 index 00000000..8cfbeedf Binary files /dev/null and b/res/chell.png differ diff --git a/res/chell.txt b/res/chell.txt new file mode 100644 index 00000000..c51b0360 --- /dev/null +++ b/res/chell.txt @@ -0,0 +1,62 @@ + . .. + :-+*+:..:. + :-+@%#=...:. + :#+*%+=- .. . + +%%##%%+=-:- + =#***#***+++ + .=++-. .*******+::.... + +%%%%%%****++++=..:::... . .. + -%%%%##%%%*####**: .:-====-= ..:-==-. . + =#%%###*+*%%*%#####%+.-*%@@@@@@@@=:-==+:-%@@@@@*: + :#%%###*===+##%@%%###%@@@@@@@@@@@@@=--::.:@@%@#*%@@*: + =%%###*+=-=+*##@@%%%@@@@@@@@@@@@@@@= ... :%@%#*-=*#+-. + .*%%%%%#######%@@@@@%@@@@@@@@@@@@@##- .....+*++****+- + *####%%%%#####%@@@@@%*#%@@@@@@@%#+. :+==++=-. + -+++***++++++=+**####==--===-::...:-++=:.=+*==-. + ..:+++=====+**#%@##*=* .-+**####**+. + ****#******#%%@%@@= :-+***+- + #*****###**######- ::: + .+#*++++**#######= + :=+=====+******###* + =**+===---==++++***+ + +#***++++======+**+ + *#****+=++++=-====- + =#****++=-:::::-=++==-:. + #****++-::-==+++++++++**=-.::-==: + :*++++=::-=+++++++++++++++++*+=--. + .++=-::-========++++++++++===++==: + ==:.::::------=============++===+=: + : ..::::-----+###*+::====== + ....::::.. .=**+=-==:.::----: + .. ..::::.. :=++*===--==:..::::. + . .:::=*#*:.=#==--:--. + :+#@#*#+:.=+=-: + .:+%%%#+#**-..--=+ + -++*#%@%#%#%+===:::::--== + :%@@@@%%%%###*-:-:::::::-=+= + +@%*%%#####*=: .-:::::::-=+. + -@@*#%%%%##*: :-:::::::-=- + =@@*#%%###+. :-:::::---= + -#@@@%%#+. --::::----- + .=+#%@* .-:::--===: + .=: .==-:---== + *@%*:::-=: + #%%@*--==+. + .#%%%%+:-=++ + *%%%%+=++*- + =*###+----. + -*##+--+* + =#%%#**+ + :#%%%%%*: + .#%%%@@@: + .-+##%@%- + ..#%%%@*: + ..=@%%%@@%= + ::%%%#%%@@%*++#-.. + ..+@%##%%%%@@%+. + :***++*#%*-. + :- + + + + diff --git a/res/distro-hanging-cables.jpg b/res/distro-hanging-cables.jpg new file mode 100644 index 00000000..01cddc88 Binary files /dev/null and b/res/distro-hanging-cables.jpg differ diff --git a/res/favicon.png b/res/favicon.png new file mode 100644 index 00000000..781d2765 Binary files /dev/null and b/res/favicon.png differ diff --git a/res/glados.png b/res/glados.png new file mode 100644 index 00000000..eebaa5a7 Binary files /dev/null and b/res/glados.png differ diff --git a/res/glados.txt b/res/glados.txt new file mode 100644 index 00000000..ec91dde9 --- /dev/null +++ b/res/glados.txt @@ -0,0 +1,62 @@ + .:.. . .. + .::. .. .. . + .... . :. + .. . .:. + .:. + .:. + . :: + .. :: + . :: . + :. + :. . . + :. . .-*%*==*#####**=-:. + .. . =##%%###%%#%%%%%%%%%#*=:. + .. .. . :###%##########%##%%%%%%%%*. + .. . +*#######################%%+ + . .. . .*++###%####################+- + ... .. ===#%%%@%%#############*****=-. + .. .. .*+*###%%%%%###**************=-: + ... ..:. .. . =*+*******+++++*#***********+--. + .. ..-: ... . :-+***+-:. ......:-+**+++++++=--. + .. -.... .:+++++: .. . :++++++++-:: + . . ...:. .++++-. .==++++=-:: + .. =*++=: .-=-: .-=====-::. + .. ..... .......... .. .:++++-. . -%%%#- . :-=====-:: + .. . ..: .... ... .:. .:-*++=: . .=##*-. .:----=-::. + .. :. .. .. . ::++++-. .------::: + ... .. .::--=++++: ... .. .:.-+++=: . :------::: + ....... -##******+= ... .: .:.==+=:. .:::---:::. + .::..... .**++++++++. ... . :===-. ...... .::::::::: + .......=++++++===: ... -===: .:::::::::. + . :+========- .. . :===-. .::::::::. + .. =========-:........ . --=-: ... ..::::::::. + -++=====---. ...... .. .=--:. . ..:::::::. + . :++++++===- ..... ----:. ...:::::: + . .----::::-----: . .---:::.. ....:::... + .. ..++++=--::::::...... ............ ::-::::::... . ...::::.. + .. ...:==++====--------::::.....::::::.:.....:::::::::::....... .....::::.. + ... .====++++========:::::..:::::---::::::.:::..:::::::::::::...:...:::... + . ... :===============--::::.::::----------:..:........:::::::...:.....:... + .. . :===============---:::::::----:::::::::.............:............... + .. :===========-------:::::::. ........................... + ... --------------------::::: :::..................... + .. --------------------:::: .:::::::::............. + . .::::::::::::::::::::::::. ::::::::::::::. + .. .. .:::::::::::::::::::::::::. .:::::::::::::. + :::::::::::::::::::::..:::::. .::.:::::::::::. + .:::::::::::::::::::::..-::::::::::::::::..:::::::::.. + ..:::::::::::::::::::.:::::::::::::::::...:::.:.... + ...:::::::::::.::::...:::::::::::::::::.......... + .........:::..:.....:::::::::::::::::......... + ...................:::::::::::::::......... + ................::::::::::::::::...... + ............::::::::::::::.... + .......:::::::::...... + ........:......... + .............. + ....... + + + + + diff --git a/res/ingress-topology.png b/res/ingress-topology.png new file mode 100644 index 00000000..ff288fbb Binary files /dev/null and b/res/ingress-topology.png differ diff --git a/res/inside-of-server.jpg b/res/inside-of-server.jpg new file mode 100644 index 00000000..6d2251e4 Binary files /dev/null and b/res/inside-of-server.jpg differ diff --git a/res/isengard.jpg b/res/isengard.jpg new file mode 100644 index 00000000..059142ca Binary files /dev/null and b/res/isengard.jpg differ diff --git a/res/johnson.png b/res/johnson.png new file mode 100644 index 00000000..24f28311 Binary files /dev/null and b/res/johnson.png differ diff --git a/res/labelled-cables-for-isengard.jpg b/res/labelled-cables-for-isengard.jpg new file mode 100644 index 00000000..6178786a Binary files /dev/null and b/res/labelled-cables-for-isengard.jpg differ diff --git a/res/labelled-cables-for-mordor-and-rivendell.jpg b/res/labelled-cables-for-mordor-and-rivendell.jpg new file mode 100644 index 00000000..15285e62 Binary files /dev/null and b/res/labelled-cables-for-mordor-and-rivendell.jpg differ diff --git a/res/logo.png b/res/logo.png new file mode 100644 index 00000000..2f0aff02 Binary files /dev/null and b/res/logo.png differ diff --git a/res/mordor-and-rivendell.jpg b/res/mordor-and-rivendell.jpg new file mode 100644 index 00000000..7aa03748 Binary files /dev/null and b/res/mordor-and-rivendell.jpg differ diff --git a/res/network-divorce.png b/res/network-divorce.png new file mode 100644 index 00000000..6483ac92 Binary files /dev/null and b/res/network-divorce.png differ diff --git a/res/new-installed-servers.jpg b/res/new-installed-servers.jpg new file mode 100644 index 00000000..63974c03 Binary files /dev/null and b/res/new-installed-servers.jpg differ diff --git a/res/wheatley.png b/res/wheatley.png new file mode 100644 index 00000000..b1c03640 Binary files /dev/null and b/res/wheatley.png differ diff --git a/res/wheatley.txt b/res/wheatley.txt new file mode 100644 index 00000000..707570fb --- /dev/null +++ b/res/wheatley.txt @@ -0,0 +1,63 @@ + ....... .:. + ...:..:.:.::..:::::::..:=--:-::... ...::=::- + ..::-----:.:::...................=++++++++++++***-::=::=. + .:--==---::::........ .. . .... .... .. ..:::--=-=---:...:.:. + .-===--::...... :.::.: + :=*#=:.... .:.:.. + .... ..-+#*++-:. :..:.: + -.-=-+****+=:. .:.:.:. + ..-:.--::. ....... :.:... + :. .. ....:.:----=********+++==-::........ .:::.:. + :. ... :=+***+-:+*########*****+++==--==-...:-===-:::.:..: + : .. .-+*#%##**+==+**************++==-:-+=-============::=..-. + .: . ::::-*%%##*++=::-==========+========-::-*+=======----=:-=++::-= + :. .. .-=:-+*#*+=:::.:-==-===++++==--=====---:-=*+-==--===-:.-*=====::-=: + :. ::. :==::=*+=:...-=+***#+-###*##*#****-==+++=--**=============-=-::.::--==: + : .:.. .=+-:=+=:..:=+#%%%%##*:*%####***###+-+=++=++=*===+============---:...-====: + .: ...:.-*#++==-..-+###%%%%%**#=-%%#########*--+*+++++*#====================---=======: + :.::-=#%#++=:..=*##%#%%%%%%##*.+#####%%####=:+**+++**+*+-+=+=======================--=- + :.. *@%+=:.:+#%%%%%%%%#*+====:=====+++*###+-=++*****++==#*============++++========--:::. + ..*@@#=..+%%%%%%%%*+=--=+*#%%%%%###***++***+====++=====-*#+=========++++++=======--:::::. + .%@@#- :*@%%%@@%+-:=*#%@@@@%%%%%%%######****#***+++++++==+#+=======+++++++======--::::..:: + :-*@@+. +%#%@@@%+::+%@@@@@@@%%%%%%%#########*+*#*+++++++++==+#+=======+++++++=====-:::: ..:: + .-+@@+..#@@@@@@*-:+%@@%%@@@@@%%%%%###########****##++++++++++=+#+=======++++++=====-:::. ..:. + .++@@+..*@@@@@%=.-%@@@@@@@@@@%%%%####**************##+++++++++==+#+=++++++++++======-:::. ..:: + -@@@+.=%%@@@@%-.+@@@@%%##**+==-------::::::::::::::-##+++++=====++#+++++++++========-::: ..: + .%@@*.-@@@@@@%=.-*++=-::::: ...:::::---::::::.. .=%+=++=======-**=++++++++======--::. ..:. + #@@%-.#@@@@@@+:::..::...::. ..:::--==++++==--::::...:#*=+++=====-:=#+++++++++=======-::. ...: + %@@+.:#@@@@@%:::.:--..:::. ..:::-=+++****+++=-::::...+#+++++====---#+++++++++=======-::.. ..:. + -@@@:.-#@@@@@*:::::-:.:::: .:::-=++**###**+++=-:::.. =#+++++===--::**+++++++=======-:::. ..:. + =@@#=:-%@@@@@=::.::-..:::. ..::--=+**#%%##**++=-::....+*+++++=====-:+#+++++++======--:::. :::: + *@@*::+@@@@@@+::.:--.::::. ..::--=++**##***+++=-::....**++++++==+=-.=#++++++++++===--:::. .::: + .#@@*:.=%@@@@@*:::::-.-:::: ..::--==++****+++==-::... -#*+++++++++-- +#++++++++++====-:::: ..--- + :#@@#-:-#%@@@@%:::.:-::-.::. .::::--==+++++==-::::.. :#*+*+++++++=:: +#+++++++++++====-::: . :==: + .*@@#-:.*@@%@%#=:::::-. ::::. .:::::-------:::::.....*#+**+++++++-: #*+++++++++++====-::: . .==- + =@@#+:-*##%@+-*=::::--..::::. ..:::::::::::::.....-*#***+++++++--. :%++++++++++++====-:::. .===. + =@@@#-:##%%@%*+*=::::::. ...::. ..............::=#*****++++++--. +#++++++++++++====-:::. :---. + .%@@@*:=%###%#=*%+-::=++=-=======--========++++++*#***++++++++--. -#++++++++++++++===--:::. .::: + *@@@@+:***%%%@#-+%=-=*%@%%%%%%#######**#*******#******++++++--. .**=+++++++++++++====-:::. .::: + -@@@%%+-*##%%%%%%@@%+--+*#%%%%%%%%%##****+++*#********++++=-: +#=++++++++++++++====-:::. .::: + *@@@%@+-+*%##%@@@@@@%#*+==+****#*#**+++++*************++=-: .+#++++++++++++++++====-:::: .::: + %%@@@@*==+*%%@@@@@@@@@%%##****-:*******************++=-:. .. +#+++++++++++++++++=====-:::. .::. + -@%@@%@++==+#%%*%@@@%%%%%%%%%%*.*#+===+**********++=-:::.. .*#+=+++++++++++++++++=====::::. .::: + =@%%%#--*+==***%@%%%%%%%%%%%%%:-+:+*=:+*******+==--:-*- -**==++++++++++++++++++=====-::::.::-. + +%%%@=.-=++==+*#%%%%%%%%%%%#%=.+=-===****++==----::--:.+#+===+++++++=====+++=========-::::::. + +%%%##-.-=+++==+++**######%%#:+*+++*+++==--------::::++=====+++++++================---:::: + =%%%#*-.==****++++=+=+++=*#*==+=====----======---:-:==-=====++++++=========--=======---. + . -#%%#+-:-+*#####**++++=--=-========++++++++++==-::::*========+++=====--:::::-=======-. + :.. .*##*+::=+###%%%####****:.-+*********++++++++==-:::=+-============---::.::--======- + .-.:-=***#*=::-+##%%%######*#=:=-+*************++++==-:::+=-==========-=+====-:--===+=. + .- .::::-=**+::=*###%#######**==--*************++++===-:--+--==========+-:--=:.:-===- + :. ::.. :+*+:.-*#########****==:-****+*****+++++=====--:-+---=----====--::.:..-=-. + -. .... .=*-.:=*#######*---:...::...:+*++++++++=====-::-+--=====--====-.::..:. + :. .:. .-=:.:+#######*=-:: . .:+++++++++======--:-+=-==========::-..: + -. .:. .:..:=+*##***=--: .. .:=+++++++++======---==-=========:+:..: + :. ..:. .:-=****+=--: :====+++++++======-=----===-::+*..:. + -. . :-=+++==-:....:..:=+=+++++++++==--:. :#-..: + :::::-=:::. ..::---::. .:-----::.. .=#:... + :-.=--***###**=-:. ..::-====-::==.:: + :... ....:=+*###*=--:::...... .. . ... . .....::--=+++++++++***=::=::=- + :----======---------::.::.:.....::::::.:......-+++++==--::.:::::...:.:. + ...::::::-::--:::.::::::::::.............::.. + .. .... .. . + diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 00000000..662d08b6 --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Home","text":"","tags":[]},{"location":"#redbrick-docs","title":"Redbrick Docs","text":"

Welcome to Redbrick's documentation. This is to keep up to date information about the technical infrastructure of Redbrick.

This is mostly intended for admins, future admins, webmasters, and everybody else who is grumpy and has no life.

","tags":[]},{"location":"#quick-links","title":"Quick Links","text":"
  • Redbrick Website
  • SSH help
  • Aperture - Redbrick's new fleet of hardware
","tags":[]},{"location":"#webgroup","title":"Webgroup","text":"","tags":[]},{"location":"#new-admins","title":"New Admins","text":"

So, you want to become an admin. Brave of you. Here's some stuff you should probably read:

  • Becoming an admin
  • Admin Cheatsheet
  • Redbrick System Administrator Policies
  • Abuse at Redbrick, and the committee's stance on it
","tags":[]},{"location":"contact/","title":"Contact Us","text":"

If you have any questions or comments, please contact us at elected-admins@redbrick.dcu.ie. Or you can join the Discord server!

","tags":[]},{"location":"hardware/","title":"Hardware","text":"

Here is a list of current hardware in Redbrick's suite of servers, switches and other bits.

","tags":[]},{"location":"hardware/#login-boxes","title":"Login Boxes","text":"
  • azazel
  • pygmalion
","tags":[]},{"location":"hardware/#nixos-boxes","title":"NixOS Boxes","text":"
  • hardcase
  • motherlode
  • icarus

  • paphos

  • zeus
","tags":[]},{"location":"hardware/#aperture","title":"Aperture","text":"
  • glados
  • wheatley
  • chell
  • johnson
  • bastion-vm
","tags":[]},{"location":"hardware/#network-hardware","title":"Network Hardware","text":"
  • arse
  • cerberus
  • mordor
  • switches
","tags":[]},{"location":"hardware/azazel/","title":"Azazel","text":"","tags":["login-box","hardware","azazel","details","debian"]},{"location":"hardware/azazel/#details","title":"Details","text":"
  • Type: Dell PowerEdge R515
  • OS: Debian 12 bookworm
  • CPU: 2 x AMD Opteron 4180 @ 2.6Ghz
  • RAM: 16GB
  • Storage: Dell PERC H200 Integrated RAID Controller
  • Disks: 2 x 146GB 15,000 RPM SAS in RAID 1
  • DAS: Worf
  • Drives: Internal SATA DVD\u00b1RW
  • Network: 2x Onboard Ethernet
","tags":["login-box","hardware","azazel","details","debian"]},{"location":"hardware/azazel/#where-to-find","title":"Where to Find","text":"
  • Internal:
    • 10.5.0.1
  • External:
    • 136.206.15.24
","tags":["login-box","hardware","azazel","details","debian"]},{"location":"hardware/azazel/#services","title":"Services","text":"
  • primary ssh login box for users (see Logging in)
  • jump-box for admins
","tags":["login-box","hardware","azazel","details","debian"]},{"location":"hardware/paphos/","title":"Paphos","text":"","tags":["hardware","paphos","details","dns","bind","ubuntu"]},{"location":"hardware/paphos/#details","title":"Details","text":"
  • Type: Dell PowerEdge R710
  • OS: Ubuntu 14.04.5 LTS \ud83d\ude2d
  • CPU: 2 x Intel Xeon CPU E5620 @ 2.40Ghz
  • RAM: 16GB
  • Drives: Internal SATA DVD\u00b1RW
  • Network: NetXtreme II BCM5709 Gigabit Ethernet
","tags":["hardware","paphos","details","dns","bind","ubuntu"]},{"location":"hardware/paphos/#where-to-find","title":"Where to Find","text":"
  • Internal:
    • 192.168.0.26
    • 192.168.0.4
    • 10.5.0.6
  • External:
    • 136.206.15.26
    • 136.206.15.52
    • 136.206.15.53
    • 136.206.15.55
    • 136.206.15.57
    • 136.206.15.58
    • 136.206.15.101
    • 136.206.15.54
    • 136.206.15.74
    • ns1.redbrick.dcu.ie
","tags":["hardware","paphos","details","dns","bind","ubuntu"]},{"location":"hardware/paphos/#services","title":"Services","text":"
  • DNS (bind)
","tags":["hardware","paphos","details","dns","bind","ubuntu"]},{"location":"hardware/pygmalion/","title":"Pygmalion","text":"","tags":["login-box","hardware","pygmalion","ubuntu"]},{"location":"hardware/pygmalion/#details","title":"Details","text":"
  • Type: Intel(R) Xeon (R)
  • OS: Debian 12 bookworm
  • CPU: 2x Intel (R) Xeon (R) E5620 2.46GHz
  • RAM: 16GB
  • Network: 4x Broadcom Corporation NetXtreme II BCM5709 Gigabit Ethernet
","tags":["login-box","hardware","pygmalion","ubuntu"]},{"location":"hardware/pygmalion/#where-to-find","title":"Where to Find","text":"
  • Internal:
    • 192.168.0.25
  • External:
    • 136.206.15.25
    • pygmalion.redbrick.dcu.ie
    • pyg.redbrick.dcu.ie
","tags":["login-box","hardware","pygmalion","ubuntu"]},{"location":"hardware/pygmalion/#services","title":"Services","text":"
  • secondary ssh login box for users (see Logging in)
  • jump-box for admins
","tags":["login-box","hardware","pygmalion","ubuntu"]},{"location":"hardware/zeus/","title":"Zeus","text":"","tags":["hardware","zeus","details","docker","ubuntu"]},{"location":"hardware/zeus/#details","title":"Details","text":"
  • Type: Dell PowerEdge R410
  • OS: Ubuntu 18.04
  • CPU: 2x Intel(R) Xeon (R) x5570 @ 2.93 GHz
  • RAM: 32GB
  • Network: 2x NetXtreme II BCM5716 Gigabit Ethernet
","tags":["hardware","zeus","details","docker","ubuntu"]},{"location":"hardware/zeus/#where-to-find","title":"Where to Find","text":"
  • Internal:
    • 192.168.0.131
  • External:
    • 136.206.15.31
","tags":["hardware","zeus","details","docker","ubuntu"]},{"location":"hardware/zeus/#services","title":"Services","text":"
  • Wetty at: wetty.redbrick.dcu.ie
  • Admin API at: api.redbrick.dcu.ie
  • brickbot2
  • Secretary's email generator at: generator.redbrick.dcu.ie
  • CodiMD at: md.redbrick.dcu.ie
  • all of this is routed through traefik as a reverse proxy
","tags":["hardware","zeus","details","docker","ubuntu"]},{"location":"hardware/aperture/","title":"Aperture","text":"","tags":["aperture","details","getting-started"]},{"location":"hardware/aperture/#what-is-aperture","title":"What is Aperture?","text":"

It's nothing to do with cameras. See about for more information on the hardware.

","tags":["aperture","details","getting-started"]},{"location":"hardware/aperture/#new-admins","title":"New Admins","text":"

If you're a new admin, this is a cheat sheet for you. In order to get broadly up to speed and understand the content of these pages, I suggest you read the following:

  • About
  • Nomad docs, specifically the job specification and managing nomad jobs pages.
  • Consul docs, specifically how it can be used with Nomad.
  • Ansible docs, specifically the playbook
","tags":["aperture","details","getting-started"]},{"location":"hardware/aperture/#faq","title":"FAQ","text":"

So, you've hit a problem. Here's some quicklinks to some common problems:

  • I can't connect to Aperture
  • Ansible is running into an error
","tags":["aperture","details","getting-started"]},{"location":"hardware/aperture/about/","title":"About Aperture","text":"

Aperture is Redbrick's fleet of hardware that was installed in May 2022 by distro, pints, skins, cawnj, ymacomp and arkues.

It consists of:

  • 3x Dell R6515 - glados, wheatley, chell
CPU RAM Storage AMD 7302P 3GHz, 16C/32T, 128M, 155W, 3200 2x 16GB RDIMM, 3200MT/s Dual Rank 4x 2TB SATA HDDs (hardware RAID)
  • 2x Ubiquiti USW Pro - rivendell, isengard
  • 1x Ubiquiti UDM Pro - mordor
","tags":["aperture","hardware"]},{"location":"hardware/aperture/about/#servers","title":"Servers","text":"

The three servers are named glados , wheatley and chell.

","tags":["aperture","hardware"]},{"location":"hardware/aperture/about/#networks","title":"Networks","text":"

The firewall is called mordor, and the two 24-port switches are called rivendell and isengard.

","tags":["aperture","hardware"]},{"location":"hardware/aperture/about/#networking","title":"Networking","text":"

The IP address range for the aperture subnet is 10.10.0.0/24, with 10.10.0.0/16 being used for user VMs.

Hostname Internal Address External Address Purpose mordor 10.10.0.1 N/A Firewall rivendell 10.10.0.2 N/A Switch isengard 10.10.0.3 N/A Switch glados 10.10.0.4 136.206.16.4 Server wheatley 10.10.0.5 136.206.16.5 Server chell 10.10.0.6 136.206.16.6 Server

Note!

Blue cables are used for production network.

","tags":["aperture","hardware"]},{"location":"hardware/aperture/about/#kvm","title":"KVM","text":"

nexus is the name of the KVM switch. It's internal IP address is 10.10.0.10.

glados is connected on port 1, wheatley on port 2, and chell on port 3.

Note!

Yellow cables are used for KVM network.

","tags":["aperture","hardware"]},{"location":"hardware/aperture/about/#idrac","title":"IDRAC","text":"

The new servers are all equipped with IDRACs. These still need to be configured.

Note!

Red cables are used for IDRAC network.

","tags":["aperture","hardware"]},{"location":"hardware/aperture/about/#images-click-me","title":"Images (click me)","text":"","tags":["aperture","hardware"]},{"location":"hardware/aperture/about/#switching-from-the-old-network-to-the-new","title":"Switching from the Old Network to the New","text":"

We have two address ranges that come in on a single redundant link, so we're exchanging that redundant link for two separate links, each taking responsibility for an address range (136.26.15.0/24 and 136.206.16.0/24). So we're surrendering redundancy to gain uptime/connectivity during the switchover only. Once the new servers are production ready, we can recombine the link to regain the redundancy.

","tags":["aperture","hardware"]},{"location":"hardware/aperture/chell/","title":"Chell","text":"","tags":["hardware","aperture","chell","details"]},{"location":"hardware/aperture/chell/#details","title":"Details","text":"
  • Type: Dell R6515
  • OS: Debian 11
  • CPU: AMD 7302P 3GHz, 16C/32T, 128M, 155W
  • RAM: 2x 16GB RDIMM, 3200MT/s Dual Rank
  • Storage: 4x 2TB SATA HDDs (hardware RAID)

Part of aperture

","tags":["hardware","aperture","chell","details"]},{"location":"hardware/aperture/chell/#where-to-find","title":"Where to Find","text":"
  • Internal:
    • 10.10.0.6
  • External:
    • 136.206.16.6
    • chell.redbrick.dcu.ie
    • chell.aperture.redbrick.dcu.ie
","tags":["hardware","aperture","chell","details"]},{"location":"hardware/aperture/glados/","title":"GlaDOS","text":"","tags":["hardware","aperture","glados","details"]},{"location":"hardware/aperture/glados/#details","title":"Details","text":"
  • Type: Dell R6515
  • OS: Debian 11
  • CPU: AMD 7302P 3GHz, 16C/32T, 128M, 155W
  • RAM: 2x 16GB RDIMM, 3200MT/s Dual Rank
  • Storage: 4x 2TB SATA HDDs (hardware RAID)

Part of aperture

","tags":["hardware","aperture","glados","details"]},{"location":"hardware/aperture/glados/#where-to-find","title":"Where to Find","text":"
  • Internal:
    • 10.10.0.4
  • External:
    • 136.206.16.4
    • glados.redbrick.dcu.ie
    • glados.aperture.redbrick.dcu.ie
","tags":["hardware","aperture","glados","details"]},{"location":"hardware/aperture/images/","title":"Aperture Images","text":"","tags":["hardware","aperture","install","images"]},{"location":"hardware/aperture/images/#servers","title":"Servers","text":"","tags":["hardware","aperture","install","images"]},{"location":"hardware/aperture/images/#networking","title":"Networking","text":"","tags":["hardware","aperture","install","images"]},{"location":"hardware/aperture/images/#some-dancing-for-good-measure","title":"Some Dancing for Good Measure","text":"

dancing.mp4

","tags":["hardware","aperture","install","images"]},{"location":"hardware/aperture/johnson/","title":"Johnson","text":"","tags":["aperture","hardware","johnson","details"]},{"location":"hardware/aperture/johnson/#details","title":"Details","text":"

Formerly albus (in a different life)

  • Type: Dell PowerEdge R515
  • OS: NixOS
  • CPU: 2 x Opteron 4334 6 core @ 3.2GHz
  • RAM: 32GB
  • Storage: LSI MegaRAID SAS 2108 RAID controller
  • Disks: 2 x 300gb SAS for boot, 8x 1tb SATA ZFS
  • Drives: Internal SATA DVD\u00b1RW
  • Network: 4x Onboard Ethernet, 802.3ad bonding
  • iDRAC NIC: Shared on port 1

Part of aperture

","tags":["aperture","hardware","johnson","details"]},{"location":"hardware/aperture/johnson/#where-to-find","title":"Where to Find","text":"
  • Internal:
    • 10.10.0.7
  • 2nd NIC is currently unused, would be a good idea to make a bond for more throughput and redundancy on the same ip
","tags":["aperture","hardware","johnson","details"]},{"location":"hardware/aperture/johnson/#services","title":"Services","text":"
  • NFS for aperture
","tags":["aperture","hardware","johnson","details"]},{"location":"hardware/aperture/wheatley/","title":"Wheatley","text":"","tags":["aperture","hardware","wheatley","details"]},{"location":"hardware/aperture/wheatley/#details","title":"Details","text":"
  • Type: Dell R6515
  • OS: Debian 11
  • CPU: AMD 7302P 3GHz, 16C/32T, 128M, 155W
  • RAM: 2x 16GB RDIMM, 3200MT/s Dual Rank
  • Storage: 4x 2TB SATA HDDs (hardware RAID)

Part of aperture

","tags":["aperture","hardware","wheatley","details"]},{"location":"hardware/aperture/wheatley/#where-to-find","title":"Where to Find","text":"
  • Internal:
    • 10.10.0.5
  • External:
    • 136.206.16.5
    • wheatley.redbrick.dcu.ie
    • wheatley.aperture.redbrick.dcu.ie
","tags":["aperture","hardware","wheatley","details"]},{"location":"hardware/network/","title":"Redbrick Network Architecture","text":"","tags":[]},{"location":"hardware/network/mordor/","title":"Mordor","text":"","tags":[]},{"location":"hardware/network/mordor/#setup","title":"Setup","text":"

The firewall is set up using the personal setup type, using the elected-admins@redbrick.dcu.ie account (stored in pwsafe

2FA is stored on the same device as the Github 2FA code.

","tags":[]},{"location":"hardware/network/mordor/#automatic-updates","title":"Automatic Updates","text":"

The UDM Pro is not set up for automatic updates for reliability reasons.

","tags":[]},{"location":"hardware/network/mordor/#network-speeds","title":"Network Speeds","text":"

We have a 10 GB/s link to DCU's core.

","tags":[]},{"location":"hardware/network/mordor/#users","title":"Users","text":"

The current elected admins should all have access to the rbadmin account on the firewall. Rootholders should not have access to the firewall unless they are explicitly granted access.

The owner account of the unifi equipment is rbadmins (email: elected-admins@redbrick.dcu.ie) with the password stored in pwsafe under unifi.

There is a \"super admin\" account that can be used for local access only, details are stored in pwsafe under udmpro-super-admin.

","tags":[]},{"location":"hardware/network/mordor/#updates","title":"Updates","text":"

The UDM Pro should be kept up to date at all times using the web interface. Please ensure there are no breaking changes before updating.

AUTO UPDATES SHOULD NEVER BE ENABLED!

This is to prevent a bad update from breaking the UDM Pro and thus the entire network. If you are confident that Unifi can produce stable updates, you may turn it on, however please let the next admins know that you have done this (and update these docs with a comment!).

","tags":[]},{"location":"hardware/network/mordor/#advanced-settings","title":"Advanced Settings","text":"

SSH is enabled to allow for rollbacks in case of a bad update (I warned you!).

Remote access is disabled as it should not be needed, the admin VPN should provide enough access for you. If it is enabled in future, please update these docs with your reasons.

","tags":[]},{"location":"hardware/network/mordor/#backups","title":"Backups","text":"

Backups are configured to run every week at 1am on a Sunday. 20 backups are stored at a time, therefore storing 20 weeks of configuration. This should be plenty of time to recover from a bad configuration change.

","tags":[]},{"location":"hardware/network/mordor/#external-addresses","title":"External Addresses","text":"

Mordor is NATted when it accesses the Internet. This is because the link address between it and DCU is on a private address.

This NATting is used only for the UDM pro device itself, not for the 136.206.16.0/24 network, and is to allow the UDM box itself to access the Internet.

The 136.206.16.0/24 network is routed down to the UDM pro box, within the DCU network. Essentially there is a route in DCU's network that says \"if you want to access 136.206.16.0/24 go to mordor\".

","tags":[]},{"location":"hardware/nix/hardcase/","title":"Hardcase","text":"","tags":["nixos","hardware","details","hardcase"]},{"location":"hardware/nix/hardcase/#details","title":"Details","text":"
  • Type: Dell PowerEdge R410
  • OS: NixOS
  • CPU: 2 x Intel Xeon X5570 @ 2.93GHz
  • RAM: 48GB, incorrectly populated
  • Storage: LSI Logic SAS1068E \"Fake\" RAID controller
  • Disks: 2 x 500GB SATA disks in RAID 1
  • Drives: Internal SATA DVD\u00b1RW
  • Network: 2x Onboard Ethernet, 802.3ad bonding
  • iDRAC NIC: Shared on port 1
  • iDRAC IP is 1.158
","tags":["nixos","hardware","details","hardcase"]},{"location":"hardware/nix/hardcase/#where-to-find","title":"Where to Find","text":"
  • Internal:
    • 192.168.0.158
  • External:
    • 136.206.15.3
","tags":["nixos","hardware","details","hardcase"]},{"location":"hardware/nix/hardcase/#services","title":"Services","text":"
  • postgreSQL
  • apache
  • monitoring
  • postfix (SMTP)
  • dovecot (IMAP)
  • mailman - mailing lists
","tags":["nixos","hardware","details","hardcase"]},{"location":"hardware/nix/icarus/","title":"Icarus","text":"

Daedalus and Icarus are were twins and thus share documentation.

However, Daedalus is now Deadalus and Icarus lives on for now albeit a little sick.

","tags":["nixos","hardware","icarus","daedalus","details"]},{"location":"hardware/nix/icarus/#details","title":"Details","text":"
  • Type: Dell PowerEdge 2950
  • OS: NixOS
  • CPU: 2x Intel Xeon L5335 @ 2.00GHz
  • RAM: 32GB (Daedalus), 16GB (Icarus)
  • Storage: Dell Perc 6/i Integrated RAID controller
  • Disks:
    • 2 x 73GB SAS disks in RAID 1 (hardware)
    • 3 x 600GB SAS disks in passthrough (3x RAID 0)
  • Drives: Internal SATA DVD\u00b1RW
  • Network: 2x Onboard Ethernet, 802.3ad bonding
  • iDRAC NIC: Shared on port 1
","tags":["nixos","hardware","icarus","daedalus","details"]},{"location":"hardware/nix/icarus/#where-to-find","title":"Where to Find","text":"
  • Internal:
    • 192.168.0.150
","tags":["nixos","hardware","icarus","daedalus","details"]},{"location":"hardware/nix/icarus/#services","title":"Services","text":"
  • LDAP
  • NFS, (a.k.a /storage)
  • GlusterFS, eventually, or some other distributed storage to replace NFS
","tags":["nixos","hardware","icarus","daedalus","details"]},{"location":"hardware/nix/motherlode/","title":"Motherlode","text":"","tags":["nixos","hardware","motherlode","details","qemu","libvirt"]},{"location":"hardware/nix/motherlode/#details","title":"Details","text":"

(Something should go here probably)

","tags":["nixos","hardware","motherlode","details","qemu","libvirt"]},{"location":"hardware/nix/motherlode/#where-to-find","title":"Where to Find","text":"
  • Internal:
    • 192.168.0.130
  • External:
    • 136.206.15.250 (dcuclubsandsocs.ie)
","tags":["nixos","hardware","motherlode","details","qemu","libvirt"]},{"location":"hardware/nix/motherlode/#services","title":"Services","text":"
  • hosts the VM for dcuclubsandsocs.ie (libvirt/QEMU)
","tags":["nixos","hardware","motherlode","details","qemu","libvirt"]},{"location":"procedures/","title":"Procedures","text":"

Here you can find a list of various procedures useful for the day-to-day running of Redbrick

","tags":[]},{"location":"procedures/#new-elected-admins","title":"New elected admins","text":"","tags":[]},{"location":"procedures/#cheatsheet","title":"Cheatsheet","text":"","tags":[]},{"location":"procedures/#admin-vpn","title":"Admin VPN","text":"","tags":[]},{"location":"procedures/#ansible","title":"Ansible","text":"","tags":[]},{"location":"procedures/#post-powercut-todo-list","title":"Post-powercut Todo List","text":"","tags":[]},{"location":"procedures/#nixos","title":"NixOS","text":"","tags":[]},{"location":"procedures/#updating-wordpress-domains","title":"Updating WordPress Domains","text":"","tags":[]},{"location":"procedures/#irc-ops","title":"IRC Ops","text":"","tags":[]},{"location":"procedures/#committee-handover","title":"Committee Handover","text":"","tags":[]},{"location":"procedures/#redbrick-system-administrator-policies","title":"Redbrick System Administrator Policies","text":"","tags":[]},{"location":"procedures/ansible/","title":"Ansible","text":"

Redbrick uses ansible to manage its infrastructure. This document describes the procedures and some tips to get the most out of it.

","tags":[]},{"location":"procedures/ansible/#getting-started","title":"Getting Started","text":"","tags":[]},{"location":"procedures/ansible/#installing-ansible","title":"Installing Ansible","text":"

Ansible is a python package, so you'll need to install python first. On Debian/Ubuntu, you can do this with:

Bash
pip install ansible\n
","tags":[]},{"location":"procedures/ansible/#add-an-ssh-key","title":"Add an SSH Key","text":"

Ansible uses ssh to connect to the remote hosts. You'll need to set up your ssh key so that you can connect to the hosts without constant prompts for passwords.

","tags":[]},{"location":"procedures/ansible/#create-a-hosts-file","title":"Create a Hosts File","text":"

This is used a phonebook of sorts for ansible. It tells ansible which hosts to connect to, and what user to use.

INI
[aperture]\nglados\nwheatley\nchell\n\n[aperture:vars]\nansible_user= <your username>\n

Contact @distro for a fully populated file.

","tags":[]},{"location":"procedures/ansible/#test-it-out","title":"Test it out","text":"Bash
ansible all -m ping\n

This should connect to all the hosts in the aperture group, and run the ping module. If it works, you're good to go!

","tags":[]},{"location":"procedures/ansible/#playbooks","title":"Playbooks","text":"

Ansible playbooks are a set of instructions for ansible to run. They're written in YAML, and are usually stored in a file called playbook.yml.

","tags":[]},{"location":"procedures/ansible/#writing-a-playbook","title":"Writing a Playbook","text":"

Ansible playbooks are written in YAML. The basic structure is:

YAML
- hosts: <group name>\n  tasks:\n    - name: <task name>\n      <module name>:\n        <module options>\n
","tags":[]},{"location":"procedures/ansible/#example","title":"Example","text":"YAML
- hosts: aperture\n  tasks:\n    - name: Install curl\n      apt:\n        name: curl\n        state: present\n

This playbook will connect to all the hosts in the aperture group, and run the apt module with the name and state options.

","tags":[]},{"location":"procedures/ansible/#running-a-playbook","title":"Running a Playbook","text":"Bash
ansible-playbook playbook.yml -i hosts\n
","tags":[]},{"location":"procedures/ansible/#more-information","title":"More Information","text":"

Redbrick's ansible configuration is stored in the ansible folder in the redbrick/nomad repository. There's some more documentation there on each playbook.

Ansible's documentation is available here.

","tags":[]},{"location":"procedures/ansible/#common-errors","title":"Common Errors","text":"","tags":[]},{"location":"procedures/ansible/#hashicorp-apt-key","title":"Hashicorp Apt Key","text":"

Sometimes, when running a playbook, you'll get an error like this:

Bash
TASK [apt : apt update packages to their latest version and autoclean] ***************************************************************************************************\nfatal: [wheatley]: FAILED! => {\"changed\": false, \"msg\": \"Failed to update apt cache: unknown reason\"}\nfatal: [chell]: FAILED! => {\"changed\": false, \"msg\": \"Failed to update apt cache: unknown reason\"}\nfatal: [glados]: FAILED! => {\"changed\": false, \"msg\": \"Failed to update apt cache: unknown reason\"}\n

This is because the Hashicorp apt key has expired. To fix this, uncomment the hashicorp-apt task in the playbook.

","tags":[]},{"location":"procedures/cheatsheet/","title":"Cheatsheet","text":"","tags":[]},{"location":"procedures/cheatsheet/#ldap","title":"LDAP","text":"
  • Query a user
Bash
ldapsearch -x uid=\"USERNAME_HERE\"\n
  • Query user as root for more detailed info
Bash
ldapsearch -D \"cn=root,ou=services,o=redbrick\" -y /etc/ldap.secret uid=user\n
  • Find all users emails created by USERNAME
Bash
ldapsearch -x createdby=\"user\" uid | awk '/uid:/ {print $2\"@redbrick.dcu.ie\"}'\n
  • Check if something is backed up on NFS (/storage/path/to/file)

All useful LDAP scripts (edit user quota, reset user password, renew user accounts, etc) are located in the home directory of root on Azazel.

Log in as root on a server with local accounts:

Bash
ssh localaccount@redbrick.dcu.ie\nsudo -i # (same password as localaccount account)\n
","tags":[]},{"location":"procedures/cheatsheet/#authenticationpasswords","title":"Authentication/Passwords","text":"","tags":[]},{"location":"procedures/cheatsheet/#onboarding-new-admins","title":"Onboarding New Admins","text":"
  • Create root ssh key for NixOS Machines Following creation of the key, add to the whitelist in nix configs.
Bash
ssh-keygen -t ed25519 # Generate key\ncat ~/.ssh/id_ed25519.pub # Verify it's been created\nssh-copy-id -i ~/.ssh/id_ed25519 user@redbrick.dcu.ie # Copy to local account's ssh dir\nssh -i ~/.ssh/mykey user@redbrick.dcu.ie # Verify that this key was copied\n
","tags":[]},{"location":"procedures/cheatsheet/#access-passwordsafe-pwsafe","title":"Access Passwordsafe (pwsafe)","text":"

Location of master password vault.

Note:

getpw will prompt you for the Master root password.

Bash
ssh localroot@halfpint\nsudo -i # to log in as root with local user password\npwsafe # to list passwords\ngetpw <name_of_pass> # Grab password by name key | getpw pygmalion\n
","tags":[]},{"location":"procedures/cheatsheet/#ssh-to-root-on-a-nixos-machine","title":"SSH to Root on a NixOS Machine","text":"
  • From the account you generated your ssh key on (in nix configs) type:
Bash
ssh root@hardcase.internal\n
","tags":[]},{"location":"procedures/cheatsheet/#nixos","title":"NixOS","text":"
  • Install a temporary program
Bash
nix-shell -p [space seperated package names]\n
  • Run brickbot2 (running on Metharme)
Bash
cd brickbot2\nnix-shell\nsource venv/bin/activate\npython3 main.py config.toml\n

Brickbot runs in tmux a -t 0 and can be restarted by pressing ctrl+c and running the above python command

","tags":[]},{"location":"procedures/cheatsheet/#minecraft-servers","title":"Minecraft Servers","text":"

The Redbrick Minecraft server's are dockerized applications running on zeus on a server-per-container basis, using the tools on this GitHub Repo: https://github.com/itzg/docker-minecraft-server#interacting-with-the-server

Repo is very well documented so have a look at the README but here's the basics:

NOTE: Local Root accounts must be added to the docker group before they can run the docker commands. usermod -a -G docker ACCOUNT_NAME

You can docker ps | grep minec to find the docker containers running the servers.

The docker compose files are located in /etc/docker-compose/services, Unmodded Vanilla compose for example is in /etc/docker-compose/services/minecraft_unmodded/

To see the configuration for the container you can do docker inspect CONTAINER_NAME_OR_ID

  • Interacting with the Server Console
    • https://github.com/itzg/docker-minecraft-server#interacting-with-the-server
","tags":[]},{"location":"procedures/handover/","title":"Committee Handover","text":"

When a new committee is elected, there are many things to hand over. This is a list of those things.

","tags":[]},{"location":"procedures/handover/#passwords","title":"Passwords","text":"

All passwords should be rotated as soon as possible. This is to ensure that passwords are rotated, and that the old committee can no longer access Redbrick using the old passwords. The passwords are stored in Bitwarden, and the master password should be rotated first and foremost.

","tags":[]},{"location":"procedures/handover/#2-factor-authentication","title":"2-Factor Authentication","text":"

The Chair holds the 2FA key for the Bitwarden account.

","tags":[]},{"location":"procedures/irc-ops/","title":"IRC Ops","text":"

This is a mirror of:

Redbrick cmt Wiki entry

","tags":[]},{"location":"procedures/irc-ops/#channel-modes","title":"Channel Modes","text":"

It's easy to bugger up the channel with the MODE command, so here's a nice copied and pasted summary of how to use it:

  • /mode {channel} +b {nick|address} - ban somebody by nickname or address mask (nick!account@host)
  • /mode {channel} +i - channel is invite-only
  • /mode {channel} +l {number} - channel is limited, with {number} users allowed maximal
  • /mode {channel} +m - channel is moderated, only chanops and others with 'voice' can talk/mode {channel} +n external /MSGs to channel are not allowed.
  • /mode {channel} +p - channel is private
  • /mode {channel} +s - channel is secret
  • /mode {channel} +t topic - limited, only chanops may change it
  • /mode {channel} +o {nick} - makes {nick} a channel operator
  • /mode {channel} +v {nick} - gives {nick} a voice
","tags":[]},{"location":"procedures/irc-ops/#other-commands","title":"Other Commands","text":"

Basically what you'll be using is:

  • To kick someone: /kick username
  • To ban someone: /mode #lobby +b username
  • To set the topic: /topic #lobby whatever
  • To op someone: /mode #lobby +o someone
  • To op two people: /mode #lobby +oo someone someone_else

Or:

  • To kick someone: /k username
  • To ban someone: /ban username
  • To unban someone: /unban username
  • To set the topic: /t whatever
  • To op someone: /op someone
  • To op two people: /op someone someone_else
  • To deop someone: /deop someone
","tags":[]},{"location":"procedures/irc-ops/#sysop-specific-commands","title":"Sysop Specific Commands","text":"

These commands can only be run by sysops (i.e. admins in the ircd config file).

  • Enter BOFH mode (required for all sysop commands): /oper
  • Peer to another server*: /sconnect <node name>
  • Drop a peer with another server: /squit <node name>
  • Force op yourself (do not abuse): /quote opme <channel name>
  • Barge into a channel uninvited (again, do not abuse):/quote ojoin #channel
  • Barge into a channel uninvited with ops (same again): /quote ojoin @#channel
  • Force someone to join a channel: /quote forcejoin nick #channel
  • Kill someone: /kill <username> <smartassed kill messsage>
  • Ban someone from this server: /kline <username> (there may be more params on this)
  • Ban someone from the entire network: /gline <username> (there may be more params on this)

(thanks to atlas for the quick overview)

  • Don't try connect to intersocs. Due to crazy endian issues or something they have to connect to us.
","tags":[]},{"location":"procedures/irc-ops/#bots","title":"Bots","text":"

It has now become a slight problem with so many bots 'littering' #lobby that anyone wishing to add a new bot to the channel must request permission from the Committee. The main feature wanted is a time limit on bot commands.

","tags":[]},{"location":"procedures/irc-ops/#services","title":"Services","text":"

The IRC services run by Trinity for all the netsocs. The two services are

NickServ and ChanServ.

  • /msg NickServ HELP
  • /msg ChanServ HELP

for more details.

","tags":[]},{"location":"procedures/new-admins/","title":"New Elected Admins","text":"

The chronological process of becoming an admin usually looks very similar each year. There are some important things you should know.

Remember, being a SysAdmin for the society is not a job, it is a volunteered task you sign up to - don't stress yourself out over it, have fun, and hopefully learn a thing or two. : )

","tags":[]},{"location":"procedures/new-admins/#process","title":"Process","text":"","tags":[]},{"location":"procedures/new-admins/#admin-exam","title":"Admin Exam","text":"

Anyone wishing to run and be elected as a SysAdmin must complete a technical exam as an assessment of your knowledge and competency in solving some of the many problems that will be thrown at you.

You can find some archives of past exams here, however note that these vary year to year as they are created each year by the currently elected admins.

","tags":[]},{"location":"procedures/new-admins/#election-at-agm","title":"Election at AGM","text":"

At the annual general meeting, you may nominate yourself, or have someone nominate you to run for SysAdmin. You may only run if you have passed the Admin exam.

The amount of admins per year is usually three, to be elected, you must be in the top three voted members.

","tags":[]},{"location":"procedures/new-admins/#onboarding","title":"Onboarding","text":"

If you are successfully elected - congrats! We welcome you to this pain joy filled journey :)

After being elected it is your time to learn the ropes and become familiar with the technicalities of Redbrick.

Not alone of course! The previous Admins will assist you on this journey and be there to answer any of your questions, along with this documentation.

","tags":[]},{"location":"procedures/nixos/","title":"NixOS","text":"

Familiarise yourself with the layout of the following. Bookmarking the page is also a good shout.

NixOS documentation

","tags":[]},{"location":"procedures/nixos/#who-is-nixos-and-what-does-he-do","title":"Who is NixOS and what Does He Do","text":"

NixOS is a distribution of linux that is focused on having a config-first operating system to run services. The advantages of such an approach are the following:

  • Files dictate how an installation is set up, and as such, can be versioned and tracked in your favourite VCS.
  • New configs can be tested, and safely rolled back.
  • Can be used for both physical and virtual machines in the same way.

Further reading on this can be found on the about page.

","tags":[]},{"location":"procedures/nixos/#being-an-admin-nixos-and-you","title":"Being an Admin: NixOS and You","text":"

There's a couple of things you'll need to do before you get started with NixOS:

  • First and foremost is to get set up to contribute to the Redbrick nix-configs repo.

Depending on the powers that be, some sort of normal pr contribution will be acceptable, if you have access a branch is appropriate, in all other cases make a fork and pr back to Redbrick's repo. This will be case by case for those of you reading.

Here's a quick hit list of stuff that's worthy of book marking also as you work with Nix:

  • NixOS Wiki
  • NixOS Manual
  • Nixpkgs index (unstable means changing, not buggy)
  • Grafana config options (as an example of how to configure an individual service)

Nix is pretty small as an OS so setting yourself up a node, either as a home server, or as a VM is a solid way to practice how stuff works in an actual environment and lets you work independently of Redbrick. A service you configure at home should be able to run on Redbrick, and vice versa.

","tags":[]},{"location":"procedures/nixos/#getting-set-up-to-start-deploying-stuff","title":"Getting Set up to Start Deploying Stuff","text":"
  • The first step is to navigate to the ssh service config in the nix-config repo here.

  • Make a pull request asking to add the PUBLIC KEY of your ssh key pait to the config file.

    • The best thing to do is to copy the previous line and modify it to contain your details instead.
    • At time of writing, it is expected for you to generate a ssh-ed25519 key. This is subject to change with new cryprographic standards.
  • Once this is done, contact one of the currently set up users to pull and reload the given machines and you'll have access right away using the accompanying key.
","tags":[]},{"location":"procedures/open-governance-tagging/","title":"Open Governance Tagging - hypnoant, wizzdom","text":"","tags":["open-gov","gpg","tagging"]},{"location":"procedures/open-governance-tagging/#1-before-the-tagging-ceremony","title":"1. Before the Tagging Ceremony","text":"","tags":["open-gov","gpg","tagging"]},{"location":"procedures/open-governance-tagging/#generating-the-key","title":"Generating the Key","text":"

To tag the Open Governance repo you will need to make a new PGP key on the behalf of redbrick committee. Below are the commands and the inputs for creating this key.

Bash
gpg --full-generate-key\n
Key Generation Menu
Please select what kind of key you want:\n   (1) RSA and RSA\n   (2) DSA and Elgamal\n   (3) DSA (sign only)\n   (4) RSA (sign only)\n   (9) ECC (sign and encrypt) *default*\n  (10) ECC (sign only)\n  (14) Existing key from card\nYour selection? 1\n\nRSA keys may be between 1024 and 4096 bits long.\nWhat keysize do you want? (3072) 4096\n\nPlease specify how long the key should be valid.\n         0 = key does not expire\n      <n>  = key expires in n days\n      <n>w = key expires in n weeks\n      <n>m = key expires in n months\n      <n>y = key expires in n years\nKey is valid for? (0) {SET FOR DATE AFTER TAGGING CEREMONY}\n\nKey expires at {DATE AFTER TAGGING CEREMONY} IST\nIs this correct? (y/N) y\n\nGnuPG needs to construct a user ID to identify your key.\nReal name: Redbrick Committee\nEmail Address: committee@redbrick.dcu.ie\nComment: Redbrick Committee (Redbrick Open Governance {YEAR-MONTH-TYPE_OF_MEETING(AGM/EGM)})\n\nChange (N)ame, (C)omment, (E)mail or (O)kay/(Q)uit? O\n
","tags":["open-gov","gpg","tagging"]},{"location":"procedures/open-governance-tagging/#first-sign","title":"First Sign","text":"

The signatory who has generated the key will then sign this key.

Bash
gpg --sign-key {REDBRICK KEY-ID}\n

You will then publish this public key to a key-server (e.g. keyserver.ubuntu.com or keys.openpgp.org).

Bash
gpg --keyserver keyserver.ubuntu.com --send-key committee@redbrick.dcu.ie\n
","tags":["open-gov","gpg","tagging"]},{"location":"procedures/open-governance-tagging/#second-sign","title":"Second Sign","text":"

The other signatory will pull the key from the key-server and will then sign this key and re-publish the key to the key-server. (You can use the more secure method below for general membership if you wish).

Bash
gpg --keyserver keyserver.ubuntu.com --recv-key {REDBRICK KEY-ID}\n\ngpg --sign-key {REDBRICK KEY-ID}\n\ngpg --keyserver keyserver.ubuntu.com --send-keys {REDBRICK KEY-ID}\n

To verify this procedure has worked and that both signatories have signed it. We will have the first signatory pull the key back down and verify the signatures.

Bash
gpg --keyserver-options no-self-sigs-only --keyserver keyserver.ubuntu.com --recv-key {REDBRICK KEY-ID}\n
","tags":["open-gov","gpg","tagging"]},{"location":"procedures/open-governance-tagging/#general-membership-sign","title":"General Membership Sign","text":"

The society now has the option to publish this key to the general membership for them to also sign this key if the current committee wishes to do so. The committee will have to release an email address or another service for the general membership to send files to.

Below is the process for a member of the general membership to sign the key.

Bash
gpg --recv-keys {REDBRICK KEY-ID}\ngpg --sign-key {REDBRICK KEY-ID}\ngpg --armor --export {REDBRICK KEY-ID} | gpg --encrypt -r {REDBRICK KEY-ID} --armor --output {REDBRICK KEY-ID}-signedBy-{OWNER KEY ID}.asc\n

They will then send this file to the signatories.

The signatories will then use the following commands to import and publish their key with the new signature. This must be done before the

Bash
gpg -d {REDBRICK KEY-ID}-signedBy-{OWNER KEY ID}.asc  | gpg --import\ngpg --send-key {REDBRICK KEY-ID}\n
","tags":["open-gov","gpg","tagging"]},{"location":"procedures/open-governance-tagging/#2-during-the-tagging-ceremony","title":"2. During the Tagging Ceremony","text":"

The first signatory shall tag the repository with the following command and styling. There shall be at least 2 witnesses separated by commas.

Bash
git tag -as {YYYY-MM-TYPEOFMEETING} {COMMIT ID}\n
Git Tag Message
Co-authored-by: {Signatory 2}\n\nWitnessed-by: ~{WITNESS}\n\nSee `knowledge/tagging.md` for more info.\n

They can then push this tag to the GitHub

Bash
git push --tags origin\n
","tags":["open-gov","gpg","tagging"]},{"location":"procedures/open-governance-tagging/#3-after-the-tagging-ceremony","title":"3. After the Tagging Ceremony","text":"","tags":["open-gov","gpg","tagging"]},{"location":"procedures/open-governance-tagging/#verifying-the-tag","title":"Verifying the Tag","text":"

Clone the git repository

Bash
git clone https://github.com/redbrick/open-governance.git\n

View the tag

Bash
git tag -v {YYYY-MM-TYPEOFMEETING}\n

Import the key

There should be a key signature at the bottom of the tag view. This should be imported into your key-ring. There may be a separate key-server used for the given years key so verify with committee that it is on the correct server for importing.

Bash
gpg --keyserver-options no-self-sigs-only --keyserver keyserver.ubuntu.com --recv-key {REDBRICK KEY-ID}\n

Verify the tag

Bash
git tag -v {YYYY-MM-TYPEOFMEETING}\n

Check the signatories

Bash
gpg --list-sigs {REDBRICK KEY-ID}\n

Import the signatories keys

Bash
gpg --list-sigs {REDBRICK KEY-ID} --keyid-format long | grep 'ID not found' | perl -nwe '/([0-9A-F]{16})/ && print \"$1\\n\"' | xargs gpg --keyserver-options no-self-sigs-only --keyserver keyserver.ubuntu.com  --recv-keys\n

Export their key

Bash
gpg --export -a {SIGNATORY KEY-ID}\n

Their key should be available at their GitHub under https://github.com/{USERNAME}.gpg

","tags":["open-gov","gpg","tagging"]},{"location":"procedures/open-governance-tagging/#externally-hosted-repos","title":"Externally Hosted Repos","text":"","tags":["open-gov","gpg","tagging"]},{"location":"procedures/open-governance-tagging/#uploading-the-repo","title":"Uploading the Repo","text":"
  • First verify that the repo is correctly tagged and signed following the previous steps.
  • Download the zip of the tag from GitHub webpage. (Or clone the repo, checkout the tag and zip the folder)
  • Sign the Zip and verify it:
Bash
gpg --sign {NAME OF ZIP}.zip\ngpg --verify {NAME OF ZIP}.zip.gpg\n
  • Export public key:
Bash
gpg --export -a {KEY-ID} > {MYKEYID}\n
  • Upload the .zip.gpg file and your public key
","tags":["open-gov","gpg","tagging"]},{"location":"procedures/open-governance-tagging/#users-verifying-the-hosted-zip","title":"Users Verifying the Hosted Zip","text":"Bash
gpg --import {KEYID}\ngpg --verify {NAME OF ZIP}.zip.gpg\n
  • Exporting the zip file:
Bash
gpg --output {NAME OF ZIP}.zip --decrypt {NAME OF ZIP}.zip.gpg\n
","tags":["open-gov","gpg","tagging"]},{"location":"procedures/policies/","title":"Redbrick System Administrator Policies","text":"

The purpose of this is to brief new Redbrick system administrators on the current setup, policies and practices in place and to serve as the place to record all such information for current and future administrators.

","tags":[]},{"location":"procedures/policies/#admin-account-priviliges","title":"Admin Account Priviliges","text":"
  • By default, all admin accounts will remain the same as the rest of the committee.
  • Each admin will recieve a local account on each machine that will be in the root group. This allows you to log on if ldap goes down.
  • Accounts should not be placed into any other 'system' or privileged accounts (e.g. pgSQL, mail, news, etc.) but by all accounts (hah, bad pun!) can be placed into useful groups (e.g. cvs, webgroup, helpdesk etc.)
","tags":[]},{"location":"procedures/policies/#root-account","title":"Root account","text":"

When su'ing to root, please observe the following:

  • Wait for the password prompt before typing in the password! Sometimes lag/terminal freezes or whatever can kick in. The other classic mistake is typing the password in place of the username (say for a console login).
  • Make sure LOGNAME is set to your UNIX name. The Linux boxes will prompt you for this. On OpenBSD you can use 'su -m' to keep the environment.
  • Don't change the root account/finger information!
  • If you wish to use another shell, place customisations in your own file. For bash, /root/.bash_profile.<USERNAME> and for zsh /root/.zshrc.<USERNAME>.

/root/.zshrc and /root/.bash_profile source in the appropriate file as long as $LOGNAME is set right (see above). Do not put personal customisations into the default root account setup, remember other people have to use it.

Common aliases can be put in /root/.profile, familiarise yourself with the existing ones, they can come in handy.

  • Please keep /root tidy. Don't leave stuff strewn about the place!
  • Make sure to check permissions and ownership on files you work on constantly especially files with important or sensitive information in them (e.g. always use cp -p when copying stuff about).
  • Only use root account when absolutely necessary. Many admin tasks can be done or tested first as a regular user.
","tags":[]},{"location":"procedures/policies/#gotchas","title":"Gotchas","text":"

Couple of things to look out for:

  • killall command, never ever use it!
  • Alias cp, mv & rm with the -i option.
  • If you're ever unsure, don't! Ask another admin or check the docs.
  • Always always double check commands before firing them off!
","tags":[]},{"location":"procedures/policies/#admin-mailing-lists","title":"Admin Mailing Lists","text":"

lists.redbrick.dcu.ie (Postorius)

  • All accounts in the root group must be on the admin mailing list and vice versa. Admins who leave/join the root group must be added/removed from the list respectively.
  • Elected Admins should also be on the elected-admins list. This address is mainly used for mail to PayPal, user renewals, registration, and general administration tasks.
  • It is the responsibility of the Elected Admins to ensure that all mailing lists (committee, helpdesk, webmaster, elected-admins, admins, etc) are all up-to-date.
","tags":[]},{"location":"procedures/policies/#admin-account-responsibilities","title":"Admin Account Responsibilities","text":"

As an administrator, your new local account has extra privileges (namely being in the root group).

For this reason, you should not run any untrusted or unknown programs or scripts.

If you must, and source code is available you should check it before running it. Compile your own versions of other user's programs you use regularly. It is far too easy for other users to trojan your account in this manner and get root.

Do not use passwordless ssh keys on any of your accounts. When using an untrusted workstation (i.e. just about any PC in DCU!) always check for keyloggers running on the local machine and never use any non system or non personal copies of PuTTY/ssh - there's no way of knowing if they have been trojaned.

","tags":[]},{"location":"procedures/policies/#general-responsibilities","title":"General Responsibilities","text":"

Look after and regularly monitor all systems, network, hardware and user requests (ones that fall outside of helpdesk's realm, of course!).

Actively ensure system and network security. We can't police all user accounts and activities, but basic system security is paramount! Keep up to date with bugtraq/securityfocus etc. Check system logs regularly, process listings, network connections, disk usage, etc.

","tags":[]},{"location":"procedures/policies/#downtime","title":"Downtime","text":"

All downtime must be scheduled and notified to the members well in advance by means of motd & announce. If it's really important, a mail to announce-redbrick and socials post may be necessary.

All unexpected/unscheduled downtime (as a result of a crash or as an emergency precaution) must be explained to the members as soon as possible after the system is brought back. A post to announce, notice in motd or possibly a mail to committee/admins is sufficient.

When performing a shutdown, start the shutdown 5 or 10 minutes in advance of the scheduled shutdown time to give people a chance to logout. It may also be useful to disable logins at this stage with a quick explanation in /etc/nologin.

","tags":[]},{"location":"procedures/policies/#documentation","title":"Documentation","text":"

Please read all the documentation before you do anything, but remember that the docs aren't complete and are sometimes out of date. Please update them as you go :D

","tags":[]},{"location":"procedures/post-powercut/","title":"Post-powercut Todo List","text":"

A list of things that should be done/checked immediately after a power cut:

  • Ensure the aperture servers have the correct IP addresses:
    • eno1 should have the internal IP address (10.10.0.0/24) - this should be reserved by DHCP on mordor
    • eno2 should have no IP address
    • br0 should have the external IP address (136.206.16.0/24) - this should also be reserved by DHCP on mordor
  • If the bastion-vm fails to start, check:
    • /storage is mounted rw on each aperture server
    • br0 is present and configured on each aperture server
    • vm-resources.service.consul is running and http://vm-resources.service.consul:8000/bastion/bastion-vm-latest.qcow2 is accessible
    • if the latest symlink points to a corrupted image, ln -sf it to an earlier one
  • All the nixos boxes rely on DNS for LDAP and NFS:
    • Make sure bind is running on paphos
    • mount /storage
    • systemctl restart httpd, php-fpm-rbusers-* and ldap
  • Apache on hardcase sometimes tries to start before networking is finished starting. To fix it, disable/re-enable it a few times. This usually makes it turn on.
  • Mailman on hardcase has a lock file at /var/lib/mailman/lock/master.lck. If it doesn't shut down correctly, this lock file will block mailman from starting up. Remove it with:
Bash
rm /var/lib/mailman/lock/master.lck\n
  • paphos is old and sometimes its time will become out of sync. To make sure its time is accurate, run:
Bash
sudo service ntp restart\n

and ensure you have the correct time with date

","tags":["powercut","todo"]},{"location":"procedures/update-wp-domain/","title":"Update a WordPress Domain - wizzdom, distro","text":"

Redbrick hosts a variety of services and websites for various clubs and societies in DCU. Oftentimes these websites hosted for societies run on WordPress due to it's ease of use.

However, what happens when you no longer have access to the domain? You can change the domain on the webserver however WordPress will redirect you to the old domain. In this case you must update the database to change the domain. This happened with TheCollegeView in 2023, you can read more about that here

","tags":[]},{"location":"procedures/update-wp-domain/#sql-commands","title":"SQL Commands","text":"

BACKUPS!!!

Ensure you have a recent backup of the database by checking /storage/backups

  • First, check what the current value is
SQL
-- validate current setting\nselect option_name,option_value from wp_2options where( option_name=\"siteurl\" or option_name=\"home\");\n
  • Now, update the option with the new value
SQL
-- update to new value\nupdate wp_2options set option_value=\"http://www.thecollegeview.redbrick.dcu.ie\" where( option_name=\"siteurl\" or option_name=\"home\");\n
  • Verify that the new value is set correctly
SQL
-- validate new value\nselect option_name,option_value from wp_2options where( option_name=\"siteurl\" or option_name=\"home\");\n
  • Now, the same again but for the post content and guid
SQL
-- update post content with new domain\nupdate wp_2posts set post_content = replace(post_content,\"://www.thecollegeview.com/\",\"://thecollegeview.redbrick.dcu.ie/\");\n\n-- update the guid with the new domain\nupdate wp_2posts set guid = replace(guid,\"://www.thecollegeview.com/\",\"://thecollegeview.redbrick.dcu.ie/\");\n
","tags":[]},{"location":"procedures/vpn/","title":"Admin VPN","text":"

The admin VPN is set up to allow admins to access the network from outside of DCU, giving them an IP address on the internal network for troubleshooting, testing and integrating.

If you just want to create a new client configuration, go here: adding a new client

","tags":[]},{"location":"procedures/vpn/#setup","title":"Setup","text":"

Installed OpenVPN using this script on glados.

","tags":[]},{"location":"procedures/vpn/#adding-a-new-client","title":"Adding a New Client","text":"

To add a new client, run the following command (as root) on Glados:

Bash
bash /root/ovpn/openvpn-install.sh\n

You will be prompted to add a new client, enter a name for the client and then the script will generate a new client.

It will be saved in /root/[client name].ovpn.

","tags":[]},{"location":"procedures/vpn/#revoking-a-client","title":"Revoking a Client","text":"

To revoke a client, run the following command (as root) on Glados:

Bash
bash /root/ovpn/openvpn-install.sh\n

You will be prompted to revoke a client, enter the name of the client you want to revoke.

","tags":[]},{"location":"procedures/vpn/#connecting-to-the-vpn","title":"Connecting to the VPN","text":"

To connect to the VPN, you will need to download the client configuration file from glados and then import it into your OpenVPN client.

","tags":[]},{"location":"services/","title":"Preface","text":"

Here you will find a list of all the services Redbrick runs, along with some configs and some important information surrounding them.

  • api
  • bastion-vm
  • bind
  • md
  • consul
  • gitea
  • irc
  • nfs
  • nomad
  • traefik
  • znapzend
","tags":[]},{"location":"services/#adding-more-services","title":"Adding More Services","text":"

In order to add a new service, you will need to edit the docs repository.

Adding a new service is as easy as creating a new file in docs/services/ with an appropriate name, and the page will be automatically added to the navigation pane.

Try to keep file names short and concise, limited to one word if possible and avoid using spaces.

The style guide for a service file should be as follows:

Markdown
---\ntitle: ServiceName\nauthor:\n  - username\ntags:\n  - relevant\n  - tags\n\n---\n\n# ServiceName - `username`\n\nShort description on how the service works and where it is running\n\n## Configuration\n\nAdd some possible useful configs here, like a docker-compose file,\ncertain command you may have had to run, or something that is not very obvious.\nLook at other services for hints on this.\n
","tags":[]},{"location":"services/api/","title":"Redbrick Administrative Web API","text":"

The source code for the API can be found here.

The Redbrick web API serves as an easy interface to carry out administrator tasks (mainly LDAP related), and for use in automation. This saves time instead of accessing machines, and formulating and executing manual LDAP queries or scripts.

The server code for the API is hosted on aperture in a docker container deployed with nomad, the job file for which is here. It is written in Python with FastAPI. This container is then served to the public using traefik.

","tags":["services","api","ldap"]},{"location":"services/api/#nomad-job-file","title":"Nomad Job File","text":"

The nomad job for Redbrick's API is similar to most other web servers for the most part. As always, all secrets are stored in consul. Some things to watch out for are:

  • The docker image on ghcr.io is private and therefore requires credentials to access.
Nomad
auth {\n    username = \"${DOCKER_USER}\"\n    password = \"${DOCKER_PASS}\"\n}\n
Nomad
template {\n  data        = <<EOH\nDOCKER_USER={{ key \"api/ghcr/username\" }}\nDOCKER_PASS={{ key \"api/ghcr/password\" }}\n...\nEOH\n
  • The docker container must access /home and /storage on icarus to configure users' home directory and webtree. This is mounted onto the aperture boxes at /oldstorage and is mounted to the containers like this:
Nomad
volumes = [\n          \"/oldstorage:/storage\",\n          \"/oldstorage/home:/home\",\n]\n
  • The container requires the LDAP secret at /etc/ldap.secret to auth with LDAP. This is stored in consul, placed in a template and mounted to the container like this:
Nomad
template {\n    destination = \"local/ldap.secret\"\n    data = \"{{ key \\\"api/ldap/secret\\\" }}\" # this is necessary as the secret has no EOF\n    }\n
  • The container is quite RAM intensive, regularly using 700-800MB. The job has been configured to allocate 1GB RAM to the container so it does not OOM. The default cpu allocation of 300 is fine.
Nomad
resources {\n    cpu = 300\n    memory = 1024\n    }\n
","tags":["services","api","ldap"]},{"location":"services/api/#reference","title":"Reference","text":"

For the most up to date, rich API reference please visit https://api.redbrick.dcu.ie/docs

All requests are validated with Basic Auth for access. See example.

Method Route URL Parameters Body GET /users/username username - Redbrick username N/A PUT /users/username username - Redbrick username ldap_key POST /users/register N/A ldap_value","tags":["services","api","ldap"]},{"location":"services/api/#examples","title":"Examples","text":"
  • GET a user's LDAP data
Python
import requests\n\nurl = \"https://api.redbrick.dcu.ie/users/USERNAME_HERE\"\n\nheaders = {\n  'Authorization': 'Basic <ENCODED_USERANDPASS_HERE>'\n}\n\nresponse = requests.request(\"GET\", url, headers=headers)\n\nprint(response.text)\n
  • PUT a user's LDAP data to change their loginShell to /usr/local/shells/zsh
Python
import requests\nimport json\n\nurl = \"https://api.redbrick.dcu.ie/users/USERNAME_HERE\"\npayload = json.dumps({\n  \"ldap_key\": \"loginShell\",\n  \"ldap_value\": \"/usr/local/shells/zsh\"\n})\nheaders = {\n  'Authorization': 'Basic <ENCODED_USERANDPASS_HERE>',\n  'Content-Type': 'application/json'\n}\n\nresponse = requests.request(\"GET\", url, headers=headers, data=payload)\n\nprint(response.text)\n
","tags":["services","api","ldap"]},{"location":"services/api/#important-notes-and-caveats","title":"Important Notes and Caveats","text":"

As the FastAPI server for the API is hosted inside of a Docker container, there are limitations to the commands we can execute that affect the \"outside\" world.

This is especially important with commands that rely on LDAP.

For example inside the ldap-register.sh script used by the /register endpoint.

  • Commands like chown which require a user group or user to be passed to them will not work because they cannot access these users/groups in the container.

  • This is prevalent in our implementation of the API that creates and modifies users' webtree directory.

How do we fix this?

Instead of relying on using users/group names for the chown command, it is advisable to instead use their unique id's.

Bash
# For example, the following commands are equivalent.\nchown USERNAME:member /storage/webtree/U/USERNAME\n\nchown 13371337:103 /storage/webtree/U/USERNAME\n# Where 13371337 is userid and 103 is the id for the 'member' group.\n

Note that USERNAME can be used to refer to the user's web directory here since it is the name of the directory and doesn't refer to the user object.

","tags":["services","api","ldap"]},{"location":"services/bastion-vm/","title":"Bastion VM","text":"

This VM is an ephemeral machine that can be placed on any nomad client that has the qemu driver enabled.

It acts as the point of ingress for Aperture, with ISS and our mordor allowing traffic to reach it's IP address externally. The VM is configured as a Nomad client itself, in the ingress node pool to ensure that only ingress-type allocations are placed there (like traefik). Those services can proxy requests from the Bastion VM to internal services using consul's service DNS resolution, it's service mesh, or by plain IP and port.

cloud-init is given a static address during the initialisation phase to configure the interface. This ensures that, even if it is replanned, it will be able to accept traffic.

The base image that the VM uses is a Debian 12 qcow file. After all configuration was done, the size of the image is ~3.2GB. The image can be used to create replicas of the ingress on other external IP addresses, creating more availability if needed.

","tags":["aperture","services","nomad","vm","ingress"]},{"location":"services/bastion-vm/#steps-to-deploy","title":"Steps to Deploy","text":"

You'll need to ensure the hosts have a bridge device configured to ensure that the networking aspect of the VM can function. See theredbrick/nomad repo for more information about the steps needed for that.

You'll need a webserver to serve the cloud-init configs. There may be another solution to this in the near future, but for now, wheatley:/home/mojito/tmp/serve contains the configurations.

Plan the Nomad job and wait for the allocation to be created. If you used the correct image (for example a backup of the qcow file) the virtual machine should be configured and should connect as normal to the Consul and Nomad clusters and become eligible for allocations. If you started from scratch, then use the ansible/redbrick-ansible.yml playbook in the redbrick/nomad repo and ensure that the hosts file is up to date.

For security's sake, there is no root login and no user accounts on the bastion VM. This is an attempt to make the node more secure. If you need to make changes, you should change the base image and apply that. The less vulnerabilities that are discovered on the bastion VM, the happier we can keep ISS and the safer Redbrick will be.

","tags":["aperture","services","nomad","vm","ingress"]},{"location":"services/bind/","title":"Bind9 - distro, ylmcc, wizzdom","text":"

bind9 is our DNS provider. Currently it runs on paphos, but this will change in the near future.

","tags":["services","dns"]},{"location":"services/bind/#configuration","title":"Configuration","text":"

The config files for bind are located in /etc/bind/master/. The most important files in this directory are:

  • db.Redbrick.dcu.ie
  • db.Rb.dcu.ie
  • various other files for other socs and members

Warning

You must never update this file without following the steps below first!

","tags":["services","dns"]},{"location":"services/bind/#updating-dns","title":"Updating DNS","text":"

To update DNS:

  1. Change directory to /etc/bind/master
Bash
cd /etc/bind/master\n
  1. Back up the db.Redbrick.dcu.ie file, usually to db.Redbrick.dcu.ie.bak
Bash
cp db.Redbrick.dcu.ie{,.bak}\n
  1. Stop changes to the file affecting DNS while you edit it
Bash
rndc freeze redbrick.dcu.ie\n
  1. Edit db.Redbrick.dcu.ie
  2. Before changing any DNS entry in the file, you must edit the serial number on 4. You can increment it by one if you want, or follow the format: YYYYMMDDrev where rev is revision. For example:
db.Redbrick.dcu.ie
2024033106 ; serial\n
  1. Once you are happy with your file, you can check it with:
Bash
named-checkzone redbrick.dcu.ie db.Redbrick.dcu.ie\n
  1. If this returns no errors, you are free to thaw the DNS freeze:
Bash
rndc thaw redbrick.dcu.ie\n
  1. Check the status of bind9:
Bash
service bind9 status\n
  1. You can access more logs from bind9 by checking /var/log/named/default.log:
Bash
tail -n 20 /var/log/named/default.log\n

Note

Once you have verified that everything is working properly. Add your changes and commit them to git.

","tags":["services","dns"]},{"location":"services/consul/","title":"Consul","text":"","tags":[]},{"location":"services/exposed/","title":"Services Exposed to the Internet - wizzdom","text":"

Firstly, it's important to mention that Redbrick is currently split in 2 parts:

  • Redbrick 2.0 a.k.a. \"old redbrick\" (on 136.206.15.0/24)
  • New Redbrick which includes Aperture (on 136.206.16.0/24)

","tags":["services","exposed"]},{"location":"services/exposed/#old-redbrick","title":"Old Redbrick","text":"
  • motherlode - 136.206.15.250
    • OS: NixOS 22.05
    • Services:
      • VM for dcuclubsandsocs.ie
  • hardcase - 136.206.15.3
    • OS: NixOS 22.05
    • Services:
      • apache httpd:
        • websites from the webtree (including, but not limited to):
          • all user's websites <user>.redbrick.dcu.ie
          • other websites are mentioned in the nix-configs repo
        • legacy websites (pretty much anything that isn't dockerized)
        • thecollegeview.ie
        • thelookonline.dcu.ie
      • email (postfix and dovecot)
      • mailing lists (mailman)
      • *.redbrick.dcu.ie also points here
  • paphos - 136.206.15.53
    • OS: Ubuntu 14.04 LTS
    • Services:
      • DNS (bind)
","tags":["services","exposed"]},{"location":"services/exposed/#new-redbrick","title":"New Redbrick","text":"
  • azazel - 136.206.16.24
    • OS: Debian 12 bookworm
    • Services:
      • primary ssh login box for users (see Logging in)
      • jump-box for admins
  • pygmalion - 136.206.16.25
    • OS: Debian 12 bookworm
    • Services:
      • secondary ssh login box for users (see Logging in)
      • jump-box for admins
","tags":["services","exposed"]},{"location":"services/exposed/#aperture","title":"Aperture","text":"

In aperture, things are done a little differently than on the other network. Instead of having a single host per service, aperture is configured to allow services to be allocated dynamically across all 3 servers using nomad, consul and traefik.

  • glados - 136.206.16.4
  • wheatley - 136.206.16.5
  • chell - 136.206.16.6
  • all 3 boxes are identical
  • OS: Debian 11 bullseye
  • Services:
    • simple nginx containers with the mascot of each server in aperture:
      • glados
      • wheatley
      • chell
    • the amikon.me website for DCU AMS in an nginx container
    • timetable.redbrick.dcu.ie a timetable that actually works, 10x better than the official DCU timetable
    • Redbrick main site redbrick.dcu.ie
    • HedgeDoc at: md.redbrick.dcu.ie
    • Admin API at: api.redbrick.dcu.ie
    • Wetty at: wetty.redbrick.dcu.ie
    • DCU Solar Racing Website solarracing.ie
    • Redbrick Password Vault (Vaultwarden) at: vault.redbrick.dcu.ie
    • URL Shortener
    • Plausible Analytics at plausible.redbrick.dcu.ie
  • Notes:
    • All web traffic is routed through traefik on the bastion VM
    • All new services will be deployed here
    • Most services here are deployed as docker containers but there's no reason you couldn't use any of the other nomad drivers
    • For more information see redbrick's Nomad repo
","tags":["services","exposed"]},{"location":"services/gitea/","title":"Gitea","text":"

Redbrick uses Gitea as an open source git host.

  • Gitea docs
  • Gogs docs, not really important, but Gitea is built on Gogs
  • Link to Redbrick deployment
","tags":[]},{"location":"services/gitea/#deployment","title":"Deployment","text":"

Gitea and its database are deployed to Hardcase which runs NixOS

  • The actual repositories are stored in /zroot/git and most other data is stored in /var/lib/gitea
  • The SECRET_KEY and INTERNAL_TOKEN_URI are stored in /var/secrets. They are not automatically created and must be copied when setting up new hosts. Permissions on the gitea_token.secret must be 740 and owned by git:gitea
  • Make sure that the gitea_token.secret does NOT have a newline character in it.
","tags":[]},{"location":"services/gitea/#other-notes","title":"Other Notes","text":"

The Giteadmin credentials are in the passwordsafe.

","tags":[]},{"location":"services/gitea/#operation","title":"Operation","text":"

Gitea is very well documented in itself. Here's a couple of special commands when deploying/migrating Gitea to a different host.

Bash
# Regenerate hooks which fixes push errors\n/path/to/gitea admin regenerate hooks\n\n# If you didn't copy the authorized_keys folder then regen that too\n/path/to/gitea admin regenerate keys\n
","tags":[]},{"location":"services/icecast/","title":"Icecast","text":"

Icecast is a streaming server that we currently host on aperture.

We stream DCUFm's Broadcasts to their apps via a stream presented on dcufm.redbrick.dcu.ie.

","tags":[]},{"location":"services/icecast/#procedure","title":"Procedure","text":"

The configuration file for icecast is located in the nomad config repo.

It should just be a case of running nomad job plan clubs-socs/dcufm.hcl to plan and run the job.

Note

The job may bind to either the internal or external address. Ensure that if you make a change to the config, you inform DCUfm that they may need to switch which server they use.

","tags":[]},{"location":"services/icecast/#streaming-to-icecast","title":"Streaming to Icecast","text":"

DCUfm use butt on a desktop in their studio to stream to Icecast.

The desktop must be connected to the VPN to ensure the stream stays up, and traefik doesn't reset the connection every 10 seconds. The current icecast configuration for the server is 10.10.0.5:2333 or 136.206.16.5:2333 (see above note).

Read more about it in this issue.

A shortcut to the VPN is available on the desktop (change a shortcut to the binary to include --connect profile.ovpn.

See here).

","tags":[]},{"location":"services/icecast/#dcufm-cheat-sheet","title":"DCUfm Cheat Sheet","text":"

This is a cheat sheet for DCUfm to help them stream to icecast.

","tags":[]},{"location":"services/icecast/#connecting-to-the-vpn","title":"Connecting to the VPN","text":"

You'll need to connect to the Redbrick VPN to stream to icecast. You can do this by double clicking the shortcut on the desktop.

You'll then need to go to bottom right corner of the screen and right click this icon:

A popup will appear, click connect. This will connect you to the VPN. It may take a second, but a window will pop up with a lot of text. The VPN will connect and then it'll close.

You should end up with an icon like this:

You're now connected to the VPN.

","tags":[]},{"location":"services/icecast/#connecting-to-icecast","title":"Connecting to Icecast","text":"

You'll need to connect to icecast to stream to it. BUTT is the software we use to stream to icecast. You'll also find this on the desktop. Once its open, (and you're connected to the VPN), press the small \"play\" button in the top left corner. This will start your stream to the server.

The username and password should already be configured in the software. If not, ask a redbrick sysadmin for the login details.

Warning!

If you find that butt is not connecting, then you may need to switch which server you're connecting to. To do this, go to settings, and then the \"Main\" tab. In the dropdown, select either DCUfm 1 or DCUfm 2 (try both, one will definitely work).

","tags":[]},{"location":"services/icecast/#saving-your-stream","title":"Saving Your Stream","text":"

Your stream will be saved automatically onto the desktop into a folder called Recordings YYYY (where YYYY is the current year), with the date and time of the recording, and the format .mp3. Take this file with you (via a USB or similar) if you want to keep it for later, it will not be kept on the desktop for long!

","tags":[]},{"location":"services/icecast/#further-information","title":"Further Information","text":"

If you have any questions, please ask a redbrick sysadmin.

","tags":[]},{"location":"services/irc/","title":"IRC","text":"","tags":[]},{"location":"services/irc/#redbrick-inspircd","title":"Redbrick InspIRCd","text":"

In 2016/2017 we began work to move to InspIRCd. This was due to the complications in ircd-hybrid and how old it was. These complications stopped new netsocs joining us so we all agreed to move irc. $ 4 years later after multiple attempts we had not migrated. Until TCD decided to shutdown their server breaking the network.

We run Inspircd v3 on Metharme. InspIRCd's docs can be found here for configuration specifics.

IRC is available at irc.redbrick.dcu.ie on port 6697. SSL is required for connection, we do not support non-SSL. When connecting from a redbrick server a user will be automatically logged in. If connecting from an external server a user must pass their password on login.

For the purpose of external peering of other servers the port 7001 is exposed as well. Similarly to clients we only support SSL on this port.

For docs on connecting and using an IRC client please refer to the wiki.

","tags":[]},{"location":"services/irc/#installation","title":"Installation","text":"

InspIRCd is installed with Nix. There is no Nix package for InspIRCd so we compile a specific git tag from source. See Nix package for details on how it is compiled. Given we only support SSL and require LDAP, we need to enable both at compile time.

","tags":[]},{"location":"services/irc/#configuration","title":"Configuration","text":"

InspIRCd's configuration is in Nix here. This config will be converted to xml on disc.

","tags":[]},{"location":"services/irc/#important-configuration","title":"Important Configuration","text":"
  • oper is a list of admin users on the irc server. Their OPER password will need to be manually hashed with hmac-sha256, and placed in a secret on the server to be read in by inspircd.
  • ldapwhitelist is a list of cidr addresses that do no require authentication. The list consists of Redbrick public and private addresses as well as oldsoc.
  • link is a list of all servers we peer with including the anope services server that runs on the same box.
","tags":[]},{"location":"services/irc/#oldsocnet","title":"oldsoc.net","text":"

oldsoc.net is a server run by old TCD netsocers. All the users on it are the remaining TCD associates following the shutdown of TCD IRCd. This server is maintained by its own users and has explicit permission to join IRC without LDAP auth.

","tags":[]},{"location":"services/irc/#anope","title":"Anope","text":"

Redbrick runs Anope services for the entire network. As with inspircd we compile from source. Refer to anopes github docs for configuration specifics.

Our current Anope is configured with standard mods of chanserv, nickserv and operserv. All config is in here.

Anope stores all info in a custom db file on disk.

","tags":[]},{"location":"services/irc/#discord-bridge-butlerx","title":"Discord Bridge - butlerx","text":"

We run a bridge between the Redbrick Discord and irc. The configuration for this is here.

The bridge adds all users from discord with the suffix _d2 and all irc users appear as them self but tagged as a bot in discord. Not all discord channels are on IRC, the config above contains a mapping of irc channels to discord channels id's. This needs to be manually updated to add more channels.

","tags":[]},{"location":"services/ldap/","title":"LDAP - m1cr0man","text":"

LDAP is our directory service. It stores usernames, passwords, UIDs, quotas, and other user specific info.

LDAP's structure is different to most other database systems. If you are not familiar with it, I recommend investing some time into looking at how schemas and distinguished names work.

","tags":["ldap","icarus","daedalus"]},{"location":"services/ldap/#deployment","title":"Deployment","text":"
  • OpenLDAP is deployed with Nix to Daedalus and Icarus
  • Daedalus is the master, Icarus is slaved to it and can be used as a read only failover
  • ldap.internal and ldap2.internal are slaved to Daedalus + Icarus respectively
  • Both servers store their data in /var/db/openldap
  • The ldap.secret, which should ALWAYS have permissions 400, and owned by the openldap user, is stored in /var/secrets. It is not automatically created and must be copied when setting up new hosts
  • rb-ldap and useradm are wrappers around LDAP that are custom built
","tags":["ldap","icarus","daedalus"]},{"location":"services/ldap/#redbrick-special-notes","title":"Redbrick Special Notes","text":"
  • The root user password is in the passwordsafe
  • The OID for most of the schema is DCU's
  • The configs that exist for NixOS were mostly ported from our last LDAP server (paphos) to maintain compatibility
  • At the time of writing, LDAP is not configured with TLS
  • There are 2 scripts to manage quotas on /storage that run on the server serving NFS (zfsquota and zfsquotaquery). They are covered under the NFS documentation.
  • There's a user in ldap called testing, for testing. The password is in pwsafe.
","tags":["ldap","icarus","daedalus"]},{"location":"services/ldap/#operation","title":"Operation","text":"

The ldap* suite of commands can be used to manage LDAP. Their man pages are very well documented, but we've provided most common operations below.

Note that the ldap.secret is a crypted file, and not equal to the actual password you need to run ldap commands.

","tags":["ldap","icarus","daedalus"]},{"location":"services/ldap/#ldapsearch-recipes","title":"Ldapsearch Recipes","text":"

ldapsearch can be used with and without authenticating as root. Without root, some fields (such as the password hash, altmail) will be hidden.

Bash
# Dump the entire LDAP database in LDIF form, which can be used as a form of backup\nldapsearch -b o=redbrick -xLLL -D cn=root,ou=ldap,o=redbrick -y /path/to/passwd.txt\n\n# Find a user by name, and print their altmail\nldapsearch -b o=redbrick -xLLL -D cn=root,ou=ldap,o=redbrick -y /path/to/passwd.txt uid=m1cr0man altmail\n\n# Find quotas for all users edited by m1cr0man\nldapsearch -b o=redbrick -xLLL updatedby=m1cr0man quota\n\n# Find all member's usernames\nldapsearch -b o=redbrick -xLLL objectClass=member uid\n\n# Find all expired users. Notice here that you can query by hidden fields, but you can't read them\nldapsearch -b o=redbrick -xLLL 'yearsPaid < 1' uid\n
","tags":["ldap","icarus","daedalus"]},{"location":"services/ldap/#ldapmodify-recipes","title":"Ldapmodify Recipes","text":"

You can instead pass a file with -f when necessary.

To test a command add -n for no-op mode.

Changing updatedby and updated is added to each command as good practise.

Bash
# Add quota info to a user\nldapmodify -x -D cn=root,ou=ldap,o=redbrick -y /path/to/passwd.txt << EOF\ndn: uid=testing,ou=accounts,o=redbrick\nchangetype: modify\nadd: quota\nquota: 3G\n-\nreplace: updatedby\nupdatedby: $USER\n-\nreplace: updated\nupdated: $(date +'%F %X')\nEOF\n\n# Change a user's shell\nldapmodify -x -D cn=root,ou=ldap,o=redbrick -y /path/to/passwd.txt << EOF\ndn: uid=testing,ou=accounts,o=redbrick\nchangetype: modify\nreplace: loginShell\nloginShell: /usr/local/shells/disusered\n-\nreplace: updatedby\nupdatedby: $USER\n-\nreplace: updated\nupdated: $(date +'%F %X')\nEOF\n\n# Update yearsPaid\nldapmodify -x -D cn=root,ou=ldap,o=redbrick -y /path/to/passwd.txt << EOF\ndn: uid=testing,ou=accounts,o=redbrick\nchangetype: modify\nreplace: yearsPaid\nyearsPaid: 1\n-\nreplace: updatedby\nupdatedby: $USER\n-\nreplace: updated\nupdated: $(date +'%F %X')\nEOF\n
","tags":["ldap","icarus","daedalus"]},{"location":"services/ldap/#ldapadd-recipes","title":"Ldapadd Recipes","text":"

Occasionally you'll need to add people or things to ldap manually, such as a user you're recreating from backups, or a reserved system name such as a new machine. This is where ldapadd comes in.

Bash
# Create a file to read the new entry from\ncat > add.ldif << EOF\ndn: uid=redbrick,ou=reserved,o=redbrick\nuid: redbrick\ndescription: DNS entry\nobjectClass: reserved\nobjectClass: top\nEOF\n\n# Import the ldif\nldapadd -x -D cn=root,ou=ldap,o=redbrick -y /path/to/passwd.txt -f add.ldif\n\n# Note if you are importing a full ldif onto a new server, use slapadd instead\n# Ensure slapd is not running first\nslapadd -v -l backup.ldif\n
","tags":["ldap","icarus","daedalus"]},{"location":"services/ldap/#other-recipes","title":"Other Recipes","text":"

On a yearly basis, the yearsPaid fields must be incremented for every users, and last year's newbies need to be not newbies anymore.

Remember to take off -n when you are ready to rock.

Adding the updated and updatedby fields from above to these queries would be a good idea.

Bash
# Decrement yearsPaid\n# WARNING NOT IDEMPOTENT, RUN ONCE\nldapsearch -b o=redbrick -xLLL -D cn=root,ou=ldap,o=redbrick -y /path/to/passwd.txt objectClass=member yearsPaid |\\\ntee yearsPaid-$(date +'%F').backup.ldif |\\\nawk '/yearsPaid/ { print \"changetype: modify\\nreplace: yearsPaid\\nyearsPaid: \" $2 - 1 } ! /yearsPaid/ {print $0}' |\\\nldapmodify -x -D cn=root,ou=ldap,o=redbrick -y /path/to/passwd.txt -n\n\n# De-newbie last year's users\nldapsearch -b o=redbrick -xLLL -D cn=root,ou=ldap,o=redbrick -y /path/to/passwd.txt newbie=TRUE dn |\\\ntee newbie-$(date +'%F').backup.ldif |\\\nawk '/^dn/ {print $0\"\\nchangetype: modify\\nreplace: newbie\\nnewbie: FALSE\\n\"}' |\\\nldapmodify -x -D cn=root,ou=ldap,o=redbrick -y /path/to/passwd.txt -n\n\n# Set quotas of users without quotas\nldapsearch -b o=redbrick -xLLL '(&(objectClass=posixAccount)(!(quota=*)))' dn |\\\nawk '/^dn/ {print $0\"\\nchangetype: modify\\nadd: quota\\nquota: 2G\\n\"}' |\\\nldapmodify -x -D cn=root,ou=ldap,o=redbrick -y /path/to/passwd.txt -n\n
","tags":["ldap","icarus","daedalus"]},{"location":"services/ldap/#troubleshooting","title":"Troubleshooting","text":"

First off, it's worth calling out that if you are coming here to find help with a client side issue, chances are the DNS rule applies:

It's probably not LDAP

With that out of the way, here's some things to check - in order.

","tags":["ldap","icarus","daedalus"]},{"location":"services/ldap/#check-reachability-of-ldap","title":"Check Reachability of LDAP","text":"

Run from the master and also from the problem client. It should return m1cr0man's details. If you get an invalid credentials or object not found check that the LDAP auth config hasn't changed. If you get a connection error then restart the service.

Bash
ldapsearch -h ldap.internal -p 389 -xLLL -b o=redbrick uid=m1cr0man\n
","tags":["ldap","icarus","daedalus"]},{"location":"services/ldap/#verify-ldap-can-be-written-to","title":"Verify LDAP Can Be Written to","text":"

Get the password from the passwordsafe. Run this from the master.

Bash
ldapmodify -D cn=root,ou=ldap,o=redbrick -x -y filewithpwd.txt << EOF\ndn: uid=m1cr0man,ou=accounts,o=redbrick\nchangetype: modify\nreplace: quota\nquota: 3G\nEOF\n

Run the command from the first troubleshooting step to verify the value changed.

If it fails with an auth issue, triple check your password file (it should contain the plain text password). If it fails with a non-auth issue, then check the service logs.

","tags":["ldap","icarus","daedalus"]},{"location":"services/ldap/#enable-debug-logging","title":"Enable Debug Logging","text":"

OpenLDAP produces a nice set of logs when the loglevel is not set.

Remove loglevel from extraConfig in the Nix config and switch, then run this command to tail the logs:

Bash
journalctl -fu openldap\n
","tags":["ldap","icarus","daedalus"]},{"location":"services/ldap/#re-syncing-secondary-ldap-servers","title":"Re-syncing Secondary LDAP Server(s)","text":"

In the event a secondary server becomes out of sync with the master, it can be synced by stopping the server, deleting its database files, then restarting the server. Do this after ensuring that config.redbrick.ldapSlaveTo is set correctly.

","tags":["ldap","icarus","daedalus"]},{"location":"services/md/","title":"HedgeDoc - wizzdom","text":"

HedgeDoc is deployed with nomad on aperture as a docker container. It is accessible through md.redbrick.dcu.ie.

HedgeDoc auths against LDAP and its configuration is available here

All sensitive variables are stored in the consul KV store.

The important points are as follows:

  • connecting to the database:
Nomad
CMD_DB_URL = \"postgres://{{ key \"hedgedoc/db/user\" }}:{{ key \"hedgedoc/db/password\" }}@{{ env \"NOMAD_ADDR_db\" }}/{{ key \"hedgedoc/db/name\" }}\"\n
  • disabling anonymous users and email signup:
Nomad
CMD_ALLOW_EMAIL_REGISTER = \"false\"\nCMD_ALLOW_ANONYMOUS      = \"false\"\nCMD_EMAIL                = \"false\"\n
  • LDAP configuration:
Nomad
CMD_LDAP_URL             = \"{{ key \"hedgedoc/ldap/url\" }}\"\nCMD_LDAP_SEARCHBASE      = \"ou=accounts,o=redbrick\"\nCMD_LDAP_SEARCHFILTER    = \"{{`(uid={{username}})`}}\"\nCMD_LDAP_PROVIDERNAME    = \"Redbrick\"\nCMD_LDAP_USERIDFIELD     = \"uidNumber\"\nCMD_LDAP_USERNAMEFIELD   = \"uid\"\n

See the HedgeDoc docs for more info on configuration.

","tags":["aperture","nomad","docker"]},{"location":"services/md/#backups","title":"Backups","text":"

The HedgeDoc database is backed up periodically by a nomad job, the configuration for which is here.

The bulk of this job is this script which:

  • grabs the alloc_id of the currently running HedgeDoc allocation from nomad
  • execs into the container running pg_dumpall dumping the database into a file with the current date and time
  • if the backup is unsuccessful the script notifies the admins on discord via a webhook.
Bash
#!/bin/bash\n\nfile=/storage/backups/nomad/postgres/hedgedoc/postgresql-hedgedoc-$(date +%Y-%m-%d_%H-%M-%S).sql\n\nmkdir -p /storage/backups/nomad/postgres/hedgedoc\n\nalloc_id=$(nomad job status hedgedoc | grep running | tail -n 1 | cut -d \" \" -f 1)\n\njob_name=$(echo ${NOMAD_JOB_NAME} | cut -d \"/\" -f 1)\n\nnomad alloc exec -task hedgedoc-db $alloc_id pg_dumpall -U {{ key \"hedgedoc/db/user\" }} > \"${file}\"\n\nfind /storage/backups/nomad/postgres/hedgedoc/postgresql-hedgedoc* -ctime +3 -exec rm {} \\; || true\n\nif [ -s \"$file\" ]; then # check if file exists and is not empty\n  echo \"Backup successful\"\n  exit 0\nelse\n  rm $file\n  curl -H \"Content-Type: application/json\" -d \\\n  '{\"content\": \"<@&585512338728419341> `PostgreSQL` backup for **'\"${job_name}\"'** has just **FAILED**\\nFile name: `'\"$file\"'`\\nDate: `'\"$(TZ=Europe/Dublin date)\"'`\\nTurn off this script with `nomad job stop '\"${job_name}\"'` \\n\\n## Remember to restart this backup job when fixed!!!\"}' \\\n  {{ key \"postgres/webhook/discord\" }}\nfi\n
","tags":["aperture","nomad","docker"]},{"location":"services/nfs/","title":"NFS / Network File Storage","text":"

NFS is used to serve the notorious /storage directory on Icarus to all of Redbrick's machines, which in turn serves /home, /webtree and some other critical folders.

","tags":[]},{"location":"services/nfs/#deployment","title":"Deployment","text":"
  • NFS is deployed with Nix on Icarus
  • It is backed onto the PowerVault MD1200 with all its disk passed through single-drive RAID 0s toallow for setup of ZFS:
    • 1 mirror of 2x 500GB drives
    • 1 mirror of 2x 750GB drives
    • 1 mirror of 2x 1TB drives
    • Stripe across all the mirrors for 2TB of usable storage
    • 1 hot spare 750GB drive
  • ZFS is configured with compression onand dedup off
  • The ZFS pool is called zbackup
","tags":[]},{"location":"services/nfs/#redbrick-special-notes","title":"Redbrick Special Notes","text":"

On each machine where /storage is where NFS is mounted, but /home and /webtree are symlinks into there.

There are 2 scripts used to control quotas, detailed below.

NFS is backed up to Albus via ZnapZend.

","tags":[]},{"location":"services/nfs/#zfsquota-and-zfsquotaquery","title":"zfsquota And zfsquotaquery","text":"

These are two bash scripts that run as systemd services on Icarus to manage quotas. This is achieved through getting and setting the userquota and userused properties of the ZFS dataset.

","tags":[]},{"location":"services/nfs/#zfsquota","title":"Zfsquota","text":"

ZFSQuota will read the quota field from LDAP and sync this with the userquota value on the dataset. It is not event driven - it runs on a timer every 3 hours and syncs all LDAP quotas with ZFS. It can be kicked off manually, which is described below. Users with no quota in LDAP will have no quota in /storage, and users who have their quota removed will persist on ZFS.

Changing user names has no impact on this since it is synced with uidNumber.

","tags":[]},{"location":"services/nfs/#zfsquotaquery","title":"Zfsquotaquery","text":"

ZFSQuotaQuery returns the quota and used space of a particular user. This is used to then inform rbquota which provides the data for the MOTD used space report. Both of these scripts are defined and deployed in the Nix config repo. It runs on port 1995/tcp.

","tags":[]},{"location":"services/nfs/#operation","title":"Operation","text":"

In general, there isn't too much to do with NFS. Below are some commands of interest for checking its status.

Bash
# On the NFS server, list the exported filesystems\nshowmount -e\n\n# Get the real space usage + fragmentation percent from ZFS\nzpool list zbackup\n\n# Check a user's quota\nzpool get userquota@m1cr0man zbackup\nzpool get userused@m1cr0man zbackup\n\n# Delete a quota from ZFS (useful if a user is deleted)\nzpool set userquota@123456=none zbackup\n\n# Get all user quota usage, and sort it by usage\nzfs userspace -o used,name zbackup | sort -h | tee used_space.txt\n\n# Resync quotas (this command will not return until it is finished)\nsystemctl start zfsquota\n\n# Check the status of zfsquotaquery\nsystemctl status zfsquotaquery\n
","tags":[]},{"location":"services/nfs/#troubleshooting","title":"Troubleshooting","text":"

In the event where clients are unable to read from NFS, your priority should be restoring the NFS server, rather than

unmounting NFS from clients. This is because NFS is mounted in hard mode everywhere, meaning that it will block on IO until a request can be fulfilled.

","tags":[]},{"location":"services/nfs/#check-the-server","title":"Check The Server","text":"Bash
# Check the ZFS volume is readable and writable\nls -l /zbackup/home\ntouch /zbackup/testfile\n\n# Check that rpc.mountd, rpc.statd and rpcbind are running and lisening\nss -anlp | grep rpc\n\n# Check the above services for errors (don't worry about blkmap)\nsystemctl status nfs-{server,idmapd,mountd}\njournalctl -fu nfs-server -u nfs-idmapd -u nfs-mountd\n
","tags":[]},{"location":"services/nfs/#check-the-client","title":"Check The Client","text":"Bash
# Check for connection to NFS\nss -atp | grep nfs\n\n# Check the fstab entry\ngrep storage /etc/fstab\n\n# Check if the NFS server port can be reached\ntelnet 192.168.0.150 2049\n# Entering gibberish should cause the connection to close\n\n# Remount read-only\nmount -o remount,ro /storage\n\n# Not much left you can do but remount entirely or reboot\n
","tags":[]},{"location":"services/nfs/#rolling-back-or-restoring-a-backup","title":"Rolling Back or Restoring a Backup","text":"

See znapzend

","tags":[]},{"location":"services/nomad/","title":"Nomad - distro, wizzdom","text":"

Adapted from redbrick/nomad README

","tags":["nomad","aperture"]},{"location":"services/nomad/#what-is-nomad","title":"What is Nomad?","text":"

Good question!

Nomad is a simple and flexible scheduler and orchestrator to deploy and manage containers and non-containerized applications - Nomad Docs

","tags":["nomad","aperture"]},{"location":"services/nomad/#deploying-a-nomad-job","title":"Deploying a Nomad Job","text":"

All Nomad job related configurations are stored in the nomad directory.

The terminology used here is explained here. This is required reading.

  • Install Nomad on your machine here
  • Clone this repo
Bash
git clone git@github.com:redbrick/nomad.git\n
  • Connect to the admin VPN
  • Set the NOMAD_ADDR environment variable:
Bash
export NOMAD_ADDR=http://<IP-ADDRESS-OF-HOST>:4646\n
  • Check you can connect to the nomad cluster:
Bash
nomad status\n
  • You should receive a list back of all jobs, now you are ready to start deploying!
Bash
nomad job plan path/to/job/file.hcl\n

This will plan the allocations and ensure that what is deployed is the correct version.

If you are happy with the deployment, run

Bash
nomad job run -check-index [id from last command] path/to/job/file.hcl\n

This will deploy the planned allocations, and will error if the file changed on disk between the plan and the run.

You can shorten this command to just

Bash
nomad job plan path/to/file.hcl | grep path/to/file.hcl | bash\n

This will plan and run the job file without the need for you to copy and paste the check index id. Only use this once you are comfortable with how Nomad places allocations.

","tags":["nomad","aperture"]},{"location":"services/nomad/#restart-a-nomad-job","title":"Restart a Nomad Job","text":"
  • First, stop and purge the currently-running job
Bash
nomad job stop -purge name-of-running-job\n
  • Run a garbage collection of jobs, evaluations, allocations, nodes and reconcile summaries of all registered jobs.
Bash
nomad system gc\n\nnomad system reconcile summaries\n\nnomad system gc # (yes, again)\n
  • Plan and run the job
Bash
nomad job plan path/to/job/file.hcl\n\nnomad job run -check-index [id from last command] path/to/job/file.hcl\n
","tags":["nomad","aperture"]},{"location":"services/nomad/#exec-into-container","title":"Exec into Container","text":"

At times it is necessary to exec into a docker container to complete maintenance, perform tests or change configurations. The syntax to do this on nomad is similar to docker exec with some small additions:

Bash
nomad alloc exec -i -t -task <task-name> <nomad-alloc-id> <command>\n

Where:

  • <task-name> is the name of the task you want to exec into (only needed when there is more than one task in job)
  • <nomad-alloc-id> is the id for the currently running allocation, obtained from the web UI, nomad CLI, or nomad API
  • <command> is the command you want to run. e.g. sh, rcon-cli
","tags":["nomad","aperture"]},{"location":"services/nomad/#cluster-configuration","title":"Cluster Configuration","text":"

nomad/cluster-config contains configuration relating to the configuration of the cluster including:

  • Node Pools
  • agent config
","tags":["nomad","aperture"]},{"location":"services/nomad/#node-pools","title":"Node Pools","text":"

Node pools are a way to group nodes together into logical groups which jobs can target that can be used to enforce where allocations are placed.

e.g. ingress-pool.hcl is a node pool that is used for ingress nodes such as the bastion-vm. Any jobs that are defined to use node_pool = \"ingress\" such as traefik.hcl and gate-proxy.hcl will only be assigned to one of the nodes in the ingress node pool (i.e. the bastion VM)

","tags":["nomad","aperture"]},{"location":"services/paste/","title":"Pastebin - wizzdom","text":"

Redbrick currently uses Privatebin as a paste utility accessible at paste.redbrick.dcu.ie and paste.rb.dcu.ie

","tags":["aperture","nomad","docker"]},{"location":"services/paste/#privatebin","title":"Privatebin","text":"

The Privatebin instance is deployed with nomad on aperture. Its configuration is available here. Privatebin doesn't support full configuration via environment variables but instead uses a conf.php file. This is passed in using nomad templates.

All sensitive variables are stored in the consul KV store.

The main points are as follows:

  • configure URL shortener (shlink)
conf.php
urlshortener = \"https://s.rb.dcu.ie/rest/v1/short-urls/shorten?apiKey={{ key \"privatebin/shlink/api\" }}&format=txt&longUrl=\"\n
  • enable file upload, set file size limit and enable compression
conf.php
fileupload = true\nsizelimit = 10485760\ncompression = \"zlib\"\n
  • Connect to PostgreSQL database
conf.php
[model]\nclass = Database\n[model_options]\ndsn = \"pgsql:host=postgres.service.consul;dbname={{ key \"privatebin/db/name\" }}\"\ntbl = \"privatebin_\"     ; table prefix\nusr = \"{{ key \"privatebin/db/user\" }}\"\npwd = \"{{ key \"privatebin/db/password\" }}\"\nopt[12] = true    ; PDO::ATTR_PERSISTENT ; use persistent connections - default\n
","tags":["aperture","nomad","docker"]},{"location":"services/servers/","title":"Servers","text":"

Redbrick provides two main servers (Azazel and Pygmalion) for it's members to use for various use cases, for example running applications or user programs.

","tags":[]},{"location":"services/servers/#entrypoints","title":"Entrypoints","text":"

The main login server used in Redbrick is Azazel. You may also log in to Pygmalion if you wish at pyg.redbrick.dcu.ie

2 Factor Authentication is required to log in to Redbrick servers. This is done via an SSH key and your Redbrick username/password combination. For more information on how to create an SSH key, and configure your account for 2FA, please read below.

","tags":[]},{"location":"services/servers/#logging-in","title":"Logging in","text":"

You've set up 2FA on your account with an SSH key, right? If not, you really have to, I'm sorry.

You can log in using SSH in your command prompt or terminal application of choice with your Redbrick username and password like so:

Bash
ssh YOUR_USERNAME@redbrick.dcu.ie -i SSH_KEY_LOCATION_PATH\n\n# When prompted for the password, please input your Redbrick account password.\n# NOTE: The \"-i\" flag specifies the location of your private ssh key.\n
","tags":[]},{"location":"services/servers/#alternatives","title":"Alternatives","text":"

If you are an unbothered king/queen that simply does not mind using a web interface, let me introduce you to wetty.redbrick.dcu.ie. You do not need an SSH key here.

","tags":[]},{"location":"services/servers/#logging-in-to-other-servers","title":"Logging in to other Servers","text":"

Your home directory is synced (i.e the same) on all public Redbrick servers. Thus the authorized_keys file will be the same on Azazel as it is on Pygmalion, meaning you can log in to pyg.redbrick.dcu.ie too, and so on.

","tags":[]},{"location":"services/servers/#setting-up-an-ssh-key","title":"Setting up an SSH Key","text":"

Generating an SSH key pair creates two long strings of characters: a public and a private key. You can place the public key on any server, and then connect to the server using an SSH client that has access to the private key.

When these keys match up, and your account password is also correct, you are granted authorisation to log in.

","tags":[]},{"location":"services/servers/#1-creating-the-key-pair","title":"1. Creating the Key Pair","text":"

On your local computer, in the command line of your choice, enter the following command:

Bash
ssh-keygen -t ed25519\n

Expected Output

Text Only
Generating public/private ed25519 key pair.\n
","tags":[]},{"location":"services/servers/#2-providing-some-extra-details","title":"2. Providing Some Extra Details","text":"

You will now be prompted with some information and input prompts:

  • The first prompt will ask where to save the keys.
Text Only
Enter file in which to save the key (e.g /home/bob/.ssh/id_ed25519):\n

You can simply press ENTER here to save them at the default location (.ssh directory in your home directory). Alternatively you can specify a custom location if you wish.

  • The second prompt will ask for a new passphrase to protect the key.
Text Only
Enter passphrase (empty for no passphrase):\n

Here you may protect this key file with a passphrase. This is optional and recommended for security.

Note

If you do not wish to add a passphrase to save you all that typing, simply press ENTER for the password and confirmation password prompts.

The newly generated public key should now be saved in /home/bob/.ssh/id_ed25519.pub. The private key is the same file is at /home/bob/.ssh/id_ed25519. (i.e under the .ssh folder in your user home directory.)

","tags":[]},{"location":"services/servers/#note-for-windows-you-heathen","title":"NOTE FOR WINDOWS (you heathen)","text":"

This key is saved under .ssh under your User directory. (i.e C:\\Users\\Bob\\.ssh\\id_ed25519)

","tags":[]},{"location":"services/servers/#3-copying-the-public-key-to-the-server","title":"3. Copying the Public Key to the Server","text":"

In this step we store our public key on the server we intend to log in to. This key will be used against our secret private key to authenticate our login.

For the purposes of this tutorial we will be using Pygmalion (pyg.redbrick.dcu.ie) as our server.

","tags":[]},{"location":"services/servers/#logging-in-to-wetty","title":"Logging in to Wetty","text":"

In order to access the server to actually place our keys in it, we need to log in via Wetty - a shell interface for Pygmalion on the web.

  • Head to wetty.redbrick.dcu.ie.

You should see this prompt:

Text Only
pygmalion.redbrick.dcu.ie login:\n

Enter your Redbrick username and press ENTER. When prompted, enter your Redbrick password. Forgot either of these?

","tags":[]},{"location":"services/servers/#adding-the-key-into-the-authorized_keys-file","title":"Adding the Key into the authorized_keys File","text":"
  • Add the key

Grab the contents of your public key. You may use the cat filepath command for this:

Bash
cat /home/bob/.ssh/id_ed25519.pub\n

On Wetty, enter the following command in the shell, with YOUR_KEY replaced with your public ssh key.

Bash
echo \"YOUR_KEY\" >> ~/.ssh/authorized_keys\n

This command will append your public key to the end of the authorized_keys file.

Note!

The speech marks surrounding YOUR_KEY are important!

","tags":[]},{"location":"services/servers/#pssst-made-a-mistake","title":"PSSST\u2026 Made a mistake?","text":"Text Only
*You can manually edit the authorized_key file in a text editor with the following command to fix any issues:*\n
Bash
nano ~/.ssh/authorized_keys\n

Congratulations! If you've made it this far, you're ready to login now.

","tags":[]},{"location":"services/servers/#forgot-your-password","title":"Forgot Your Password?","text":"

Contact an admin on our Discord Server or at elected-admins@redbrick.dcu.ie

","tags":[]},{"location":"services/socs/","title":"Socs Using Redbrick Infrastructure","text":"
  • MPS/DCUfm - icecast
  • MPS/TheCollegeView - TheCollegeView - wordpress
  • DCU Style - The Look - wordpress
  • DCU Solar Racing - Solarracing.ie
  • DCU Games Society - minecraft
","tags":[]},{"location":"services/traefik/","title":"Traefik","text":"","tags":[]},{"location":"services/user-vms/","title":"User VMs","text":"

User VMs are deployed on aperture using nomad's QEMU driver.

Each VM is configured with cloud-init. Those configuration files are served by wheatley, but they can be served by any HTTP server.

","tags":["aperture","nomad","qemu"]},{"location":"services/user-vms/#setting-up-networking-on-the-host","title":"Setting up Networking on the Host","text":"

The host needs to be configured to allow the VMs to communicate with each other. This is done by creating a bridge and adding the VMs to it.

","tags":["aperture","nomad","qemu"]},{"location":"services/user-vms/#create-a-bridge","title":"Create a Bridge","text":"

To create a bridge that qemu can use to place the guest (VM) onto the same network as the host, follow the instructions listed here for iproute2, summarised below.

We need to create a bridge interface on the host.

Bash
sudo ip link add name br0 type bridge\nsudo ip link set dev br0 up\n

We'll be adding a physical interface to this bridge to allow it to communicate with the external (UDM) network.

Bash
sudo ip link set eno2 master br0\n

You'll need to assign an IP address to the bridge interface. This will be used as the default address for the host. You can do this with DHCP or by assigning a static IP address. The best way to do this is to create a DHCP static lease on the UDM for the bridge interface MAC address.

Note

TODO: Find out why connectivity seems to be lost when the bridge interface receives an address before the physical interface. If connectivity is lost, release the addresses from both the bridge and the physical interface (in that order) with sudo dhclient -v -r <iface> and then run sudo dhclient -v <iface> to assign the bridge interface an address.

","tags":["aperture","nomad","qemu"]},{"location":"services/user-vms/#add-the-vms-to-the-bridge","title":"Add the VMs to the Bridge","text":"

The configuration of the qemu network options in the job file will create a new tap interface and add it to the bridge and the VM. I advise you for your own sanity to never touch the network options, they will only cause you pain.

For others looking, this configuration is specific to QEMU only.

Bash
qemu-system-x86_64 ... -netdev bridge,id=hn0 -device virtio-net-pci,netdev=hn0,id=nic1\n

This will assign the VM an address on the external network. The VM will be able to communicate with the host and other VMs in the network.

You must also add allow br0 to /etc/qemu/bridge.conf to allow qemu to add the tap interfaces to the bridge. Source

The VMs, once connected to the bridge, will be assigned an address via DHCP. You can assign a static IP address to the VMs by adding a DHCP static lease on the UDM for the VMs MAC address. You can get the address of a VM by checking the nomad alloc logs for that VM and searching for ens3.

Bash
nomad job status distro-vm | grep \"Node ID\" -A 1 | tail -n 1 | cut -d \" \" -f 1\n# <alloc-id>\nnomad alloc logs <alloc-id> | grep -E \"ens3.*global\" | cut -d \"|\" -f 4 | xargs\n# cloud init... ens3: <ip-address> global\n
","tags":["aperture","nomad","qemu"]},{"location":"services/user-vms/#configuring-the-vms","title":"Configuring the VMs","text":"

The VMs are configured with cloud-init. Their docs are pretty good, so I won't repeat them here. The files can be served by any HTTP server, and the address is placed into the job file in the QEMU options.

Nomad
...\n        args = [\n          ...\n          \"virtio-net-pci,netdev=hn0,id=nic1,mac=52:54:84:ba:49:22\", # make sure this MAC address is unique!!\n          \"-smbios\",\n          \"type=1,serial=ds=nocloud-net;s=http://136.206.16.5:8000/\",\n        ]\n...\n

Here in the args block:

  • we define that the VM will have a network device using the virtio driver, we pass it an id and a random unique MAC address
  • we tell it to use smbios type 1 and to grab its cloud-init configs from http://136.206.16.5:8000/

Note

If you're running multiple VMs on the same network make sure to set different MAC addresses for each VM, otherwise you'll have a bad time.

","tags":["aperture","nomad","qemu"]},{"location":"services/user-vms/#creating-a-new-vm","title":"Creating a New VM","text":"

To create a new VM, you'll need to create a new job file and a cloud-init configuration file. Copy any of the existing job files and modify them to suit your needs. The cloud-init configuration files can be copied and changed based on the user also. Remember to ensure the MAC addresses are unique!

","tags":["aperture","nomad","qemu"]},{"location":"services/wetty/","title":"Wetty - wizzdom","text":"

Redbrick uses Wetty as our web terminal of choice. It is accessible at wetty.redbrick.dcu.ie, wetty.rb.dcu.ie,term.redbrick.dcu.ie, anyterm.redbrick.dcu.ie and ajaxterm.redbrick.dcu.ie.

Why all the different domains? - For legacy reasons!

The configuration is located here

The configuration for Wetty is pretty straightforward:

  • SSHHOST - the host that Wetty will connect to (one of the Login boxes), defined in consul
  • SSHPORT - the port used for ssh
  • BASE - the base path for Wetty (default is /wetty)
    • This isn't very well documented but trust the process. It works!!
Nomad
SSHHOST={{ key \"wetty/ssh/host\" }}\nSSHPORT=22\nBASE=/\n
","tags":["aperture","nomad","docker"]},{"location":"services/znapzend/","title":"ZnapZend","text":"","tags":[]},{"location":"services/znapzend/#overview","title":"Overview","text":"

ZnapZend is used to back up the NFS ZFS dataset from our NFS server to Albus.

It can also be used to back up other ZFS datasets on other hosts, but at the time of writing NFS is the only thing being backed up this way.

ZnapZend runs on the client and sends backups to Albus over SSH using zfs send | zfs receive piping.

The backup strategy can be viewed in the NixOS configuration.

","tags":[]},{"location":"services/znapzend/#adding-another-backup","title":"Adding Another Backup","text":"

There is not much manual configuration to add a host to the ZnapZend backups.

  1. Create an SSH key for the root user with no passphrase on the host you want to send the backups from. Use ssh-keygen -t ed25519.
  2. Add this new SSH public key to the rbbackup user's authorized keys on Albus.
  3. Try SSHing to rbbackups@albus.internal to load the host key and test the passwordless authentication.
  4. Import the znapzend service config on the sending host and configure redbrick.znapzendSourceDataset and redbrick.znapzendDestDataset. Then apply the config.

Note

The DestDataset must be unique across all configured backups/servers.

","tags":[]},{"location":"services/znapzend/#debugging","title":"Debugging","text":"

Znapzend runs at the top of every hour to make backups. You can watch the progress with journalctl -fu znapzend.service.

Failures are usually caused by incorrect SSH configuration, so make sure that passwordless auth using the sending host's root SSH key is working.

","tags":[]},{"location":"services/znapzend/#rolling-back-nfs","title":"Rolling Back NFS","text":"

If the NFS server is online and functional, you do not need to involve Albus to roll back changes, as all the snapshots

are kept on Icarus too.

  1. Find the snapshot you want to restore with zfs list -t snapshot.
  2. Run zfs rollback $snapshotname.

That's it! These instructions obviously work for backups other than NFS too, should any ever exist.

","tags":[]},{"location":"services/znapzend/#restoring-nfs-from-a-backup","title":"Restoring NFS from a Backup","text":"

If the NFS server has died or you are creating a copy of it, here's how to pull the dataset from Albus,

  1. On Albus, find the snapshot you want to restore with zfs list -t snapshot.
  2. Open a screen/tmux, and copy the snapshot to a dataset in your target ZFS pool with:

bash ssh albus zfs send -vRLec $snapshotname | zfs receive $newpool/$datasetname`

","tags":[]},{"location":"tags/","title":"Tags","text":""},{"location":"tags/#aperture","title":"aperture","text":"
  • Aperture
  • About Aperture
  • Chell
  • GlaDOS
  • Aperture Images
  • Johnson
  • Wheatley
  • Bastion VM
  • MD (HedgeDoc)
  • Nomad
  • Pastebin
  • User VMs
  • Wetty
"},{"location":"tags/#api","title":"api","text":"
  • Admin API
"},{"location":"tags/#azazel","title":"azazel","text":"
  • Azazel
"},{"location":"tags/#bind","title":"bind","text":"
  • Paphos
"},{"location":"tags/#chell","title":"chell","text":"
  • Chell
"},{"location":"tags/#daedalus","title":"daedalus","text":"
  • Icarus
  • LDAP
"},{"location":"tags/#debian","title":"debian","text":"
  • Azazel
"},{"location":"tags/#details","title":"details","text":"
  • Azazel
  • Paphos
  • Zeus
  • Aperture
  • Chell
  • GlaDOS
  • Johnson
  • Wheatley
  • Hardcase
  • Icarus
  • Motherlode
"},{"location":"tags/#dns","title":"dns","text":"
  • Paphos
  • Bind (DNS)
"},{"location":"tags/#docker","title":"docker","text":"
  • Zeus
  • MD (HedgeDoc)
  • Pastebin
  • Wetty
"},{"location":"tags/#exposed","title":"exposed","text":"
  • Services Exposed to the Internet
"},{"location":"tags/#getting-started","title":"getting-started","text":"
  • Aperture
"},{"location":"tags/#glados","title":"glados","text":"
  • GlaDOS
"},{"location":"tags/#gpg","title":"gpg","text":"
  • Open Governance Tagging
"},{"location":"tags/#hardcase","title":"hardcase","text":"
  • Hardcase
"},{"location":"tags/#hardware","title":"hardware","text":"
  • Azazel
  • Paphos
  • Pygmalion
  • Zeus
  • About Aperture
  • Chell
  • GlaDOS
  • Aperture Images
  • Johnson
  • Wheatley
  • Hardcase
  • Icarus
  • Motherlode
"},{"location":"tags/#icarus","title":"icarus","text":"
  • Icarus
  • LDAP
"},{"location":"tags/#images","title":"images","text":"
  • Aperture Images
"},{"location":"tags/#ingress","title":"ingress","text":"
  • Bastion VM
"},{"location":"tags/#install","title":"install","text":"
  • Aperture Images
"},{"location":"tags/#johnson","title":"johnson","text":"
  • Johnson
"},{"location":"tags/#ldap","title":"ldap","text":"
  • Admin API
  • LDAP
"},{"location":"tags/#libvirt","title":"libvirt","text":"
  • Motherlode
"},{"location":"tags/#login-box","title":"login-box","text":"
  • Azazel
  • Pygmalion
"},{"location":"tags/#motherlode","title":"motherlode","text":"
  • Motherlode
"},{"location":"tags/#nixos","title":"nixos","text":"
  • Hardcase
  • Icarus
  • Motherlode
"},{"location":"tags/#nomad","title":"nomad","text":"
  • Bastion VM
  • MD (HedgeDoc)
  • Nomad
  • Pastebin
  • User VMs
  • Wetty
"},{"location":"tags/#open-gov","title":"open-gov","text":"
  • Open Governance Tagging
"},{"location":"tags/#paphos","title":"paphos","text":"
  • Paphos
"},{"location":"tags/#powercut","title":"powercut","text":"
  • Post-powercut Todo List
"},{"location":"tags/#pygmalion","title":"pygmalion","text":"
  • Pygmalion
"},{"location":"tags/#qemu","title":"qemu","text":"
  • Motherlode
  • User VMs
"},{"location":"tags/#services","title":"services","text":"
  • Admin API
  • Bastion VM
  • Bind (DNS)
  • Services Exposed to the Internet
"},{"location":"tags/#tagging","title":"tagging","text":"
  • Open Governance Tagging
"},{"location":"tags/#todo","title":"todo","text":"
  • Post-powercut Todo List
"},{"location":"tags/#ubuntu","title":"ubuntu","text":"
  • Paphos
  • Pygmalion
  • Zeus
"},{"location":"tags/#vm","title":"vm","text":"
  • Bastion VM
"},{"location":"tags/#wheatley","title":"wheatley","text":"
  • Wheatley
"},{"location":"tags/#zeus","title":"zeus","text":"
  • Zeus
"}]} \ No newline at end of file diff --git a/services/api/index.html b/services/api/index.html new file mode 100644 index 00000000..42a50dc4 --- /dev/null +++ b/services/api/index.html @@ -0,0 +1,2383 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Admin API - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

Redbrick Administrative Web API

+

The source code for the API can be found here.

+

The Redbrick web API serves as an easy interface to carry out administrator tasks (mainly LDAP related), and for use in automation. This saves time instead of accessing machines, and formulating and executing manual LDAP queries or scripts.

+

The server code for the API is hosted on aperture in a docker container deployed with nomad, the job file for which is here. It is written in Python with FastAPI. This container is then served to the public using traefik.

+

Nomad Job File

+

The nomad job for Redbrick's API is similar to most other web servers for the most part. As always, all secrets are stored in consul. Some things to watch out for are:

+
    +
  • The docker image on ghcr.io is private and therefore requires credentials to access.
  • +
+
Nomad
auth {
+    username = "${DOCKER_USER}"
+    password = "${DOCKER_PASS}"
+}
+
+
Nomad
template {
+  data        = <<EOH
+DOCKER_USER={{ key "api/ghcr/username" }}
+DOCKER_PASS={{ key "api/ghcr/password" }}
+...
+EOH
+
+
    +
  • The docker container must access /home and /storage on icarus to configure users' home directory and webtree. This is mounted onto the aperture boxes at /oldstorage and is mounted to the containers like this:
  • +
+
Nomad
volumes = [
+          "/oldstorage:/storage",
+          "/oldstorage/home:/home",
+]
+
+
    +
  • The container requires the LDAP secret at /etc/ldap.secret to auth with LDAP. This is stored in consul, placed in a template and mounted to the container like this:
  • +
+
Nomad
template {
+    destination = "local/ldap.secret"
+    data = "{{ key \"api/ldap/secret\" }}" # this is necessary as the secret has no EOF
+    }
+
+
    +
  • The container is quite RAM intensive, regularly using 700-800MB. The job has been configured to allocate 1GB RAM to the container so it does not OOM. The default cpu allocation of 300 is fine.
  • +
+
Nomad
resources {
+    cpu = 300
+    memory = 1024
+    }
+
+

Reference

+

For the most up to date, rich API reference please visit https://api.redbrick.dcu.ie/docs

+

All requests are validated with Basic Auth for access. See example.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MethodRouteURL ParametersBody
GET/users/usernameusername - Redbrick usernameN/A
PUT/users/usernameusername - Redbrick usernameldap_key
POST/users/registerN/Aldap_value
+

Examples

+
    +
  • GET a user's LDAP data
  • +
+
Python
import requests
+
+url = "https://api.redbrick.dcu.ie/users/USERNAME_HERE"
+
+headers = {
+  'Authorization': 'Basic <ENCODED_USERANDPASS_HERE>'
+}
+
+response = requests.request("GET", url, headers=headers)
+
+print(response.text)
+
+
    +
  • PUT a user's LDAP data to change their loginShell to /usr/local/shells/zsh
  • +
+
Python
import requests
+import json
+
+url = "https://api.redbrick.dcu.ie/users/USERNAME_HERE"
+payload = json.dumps({
+  "ldap_key": "loginShell",
+  "ldap_value": "/usr/local/shells/zsh"
+})
+headers = {
+  'Authorization': 'Basic <ENCODED_USERANDPASS_HERE>',
+  'Content-Type': 'application/json'
+}
+
+response = requests.request("GET", url, headers=headers, data=payload)
+
+print(response.text)
+
+

Important Notes and Caveats

+

As the FastAPI server for the API is hosted inside of a Docker container, there are limitations to the commands we can execute that affect the "outside" world.

+

This is especially important with commands that rely on LDAP.

+

For example inside the ldap-register.sh script used by the /register endpoint.

+
    +
  • +

    Commands like chown which require a user group or user to be passed to them will not work because they cannot access these users/groups in the container.

    +
  • +
  • +

    This is prevalent in our implementation of the API that creates and modifies users' webtree directory.

    +
  • +
+

How do we fix this?

+

Instead of relying on using users/group names for the chown command, it is advisable to instead use their unique id's.

+
Bash
# For example, the following commands are equivalent.
+chown USERNAME:member /storage/webtree/U/USERNAME
+
+chown 13371337:103 /storage/webtree/U/USERNAME
+# Where 13371337 is userid and 103 is the id for the 'member' group.
+
+
+

Note that USERNAME can be used to refer to the user's web directory here since it is the name of the directory and doesn't refer to the user object.

+
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/services/bastion-vm/index.html b/services/bastion-vm/index.html new file mode 100644 index 00000000..e03baa61 --- /dev/null +++ b/services/bastion-vm/index.html @@ -0,0 +1,2242 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Bastion VM - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

Bastion VM

+

This VM is an ephemeral machine that can be placed on any nomad client that has the qemu driver enabled.

+

It acts as the point of ingress for Aperture, with ISS and our mordor allowing traffic to reach it's IP address externally. The VM is configured as a Nomad client itself, in the ingress node pool to ensure that only ingress-type allocations are placed there (like traefik). Those services can proxy requests from the Bastion VM to internal services using consul's service DNS resolution, it's service mesh, or by plain IP and port.

+

Ingress Topology with Bastion VM

+

cloud-init is given a static address during the initialisation phase to configure the interface. This ensures that, even if it is replanned, it will be able to accept traffic.

+

The base image that the VM uses is a Debian 12 qcow file. After all configuration was done, the size of the image is ~3.2GB. The image can be used to create replicas of the ingress on other external IP addresses, creating more availability if needed.

+

Steps to Deploy

+

You'll need to ensure the hosts have a bridge device configured to ensure that the networking aspect of the VM can function. See theredbrick/nomad repo for more information about the steps needed for that.

+

You'll need a webserver to serve the cloud-init configs. There may be another solution to this in the near future, but for now, wheatley:/home/mojito/tmp/serve contains the configurations.

+

Plan the Nomad job and wait for the allocation to be created. If you used the correct image (for example a backup of the qcow file) the virtual machine should be configured and should connect as normal to the Consul and Nomad clusters and become eligible for allocations. If you started from scratch, then use the ansible/redbrick-ansible.yml playbook in the redbrick/nomad repo and ensure that the hosts file is up to date.

+

For security's sake, there is no root login and no user accounts on the bastion VM. This is an attempt to make the node more secure. If you need to make changes, you should change the base image and apply that. The less vulnerabilities that are discovered on the bastion VM, the happier we can keep ISS and the safer Redbrick will be.

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/services/bind/index.html b/services/bind/index.html new file mode 100644 index 00000000..e286054a --- /dev/null +++ b/services/bind/index.html @@ -0,0 +1,2285 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Bind (DNS) - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

Bind9 - distro, ylmcc, wizzdom

+

bind9 is our DNS provider. Currently it runs on paphos, but this will change in the near future.

+

Configuration

+

The config files for bind are located in /etc/bind/master/. The most important files in this directory are:

+
    +
  • db.Redbrick.dcu.ie
  • +
  • db.Rb.dcu.ie
  • +
  • various other files for other socs and members
  • +
+
+

Warning

+

You must never update this file without following the steps below first!

+
+

Updating DNS

+

To update DNS:

+
    +
  1. Change directory to /etc/bind/master
  2. +
+
Bash
cd /etc/bind/master
+
+
    +
  1. Back up the db.Redbrick.dcu.ie file, usually to db.Redbrick.dcu.ie.bak
  2. +
+
Bash
cp db.Redbrick.dcu.ie{,.bak}
+
+
    +
  1. Stop changes to the file affecting DNS while you edit it
  2. +
+
Bash
rndc freeze redbrick.dcu.ie
+
+
    +
  1. Edit db.Redbrick.dcu.ie
  2. +
  3. Before changing any DNS entry in the file, you must edit the serial number on 4. You can increment it by one if
    +you want, or follow the format: YYYYMMDDrev where rev is revision. For example:
  4. +
+
db.Redbrick.dcu.ie
2024033106 ; serial
+
+
    +
  1. Once you are happy with your file, you can check it with:
  2. +
+
Bash
named-checkzone redbrick.dcu.ie db.Redbrick.dcu.ie
+
+
    +
  1. If this returns no errors, you are free to thaw the DNS freeze:
  2. +
+
Bash
rndc thaw redbrick.dcu.ie
+
+
    +
  1. Check the status of bind9:
  2. +
+
Bash
service bind9 status
+
+
    +
  1. You can access more logs from bind9 by checking /var/log/named/default.log:
  2. +
+
Bash
tail -n 20 /var/log/named/default.log
+
+
+

Note

+

Once you have verified that everything is working properly. Add your changes and commit them to git.

+
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/services/codimd/index.html b/services/codimd/index.html new file mode 100644 index 00000000..6728b8c2 --- /dev/null +++ b/services/codimd/index.html @@ -0,0 +1,15 @@ + + + + + + Redirecting... + + + + + + +Redirecting... + + diff --git a/services/consul/index.html b/services/consul/index.html new file mode 100644 index 00000000..735b5697 --- /dev/null +++ b/services/consul/index.html @@ -0,0 +1,2158 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Consul - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

Consul

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/services/exposed/index.html b/services/exposed/index.html new file mode 100644 index 00000000..2c65e1b7 --- /dev/null +++ b/services/exposed/index.html @@ -0,0 +1,2342 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Services Exposed to the Internet - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

Services Exposed to the Internet - wizzdom

+

Firstly, it's important to mention that Redbrick is currently split in 2 parts:

+
    +
  • Redbrick 2.0 a.k.a. "old redbrick" (on 136.206.15.0/24)
  • +
  • New Redbrick which includes Aperture (on 136.206.16.0/24)
  • +
+

+

Old Redbrick

+
    +
  • motherlode - 136.206.15.250 +
  • +
  • hardcase - 136.206.15.3
      +
    • OS: NixOS 22.05
    • +
    • Services:
        +
      • apache httpd:
          +
        • websites from the webtree (including, but not limited to):
            +
          • all user's websites <user>.redbrick.dcu.ie
          • +
          • other websites are mentioned in the nix-configs repo
          • +
          +
        • +
        • legacy websites (pretty much anything that isn't dockerized)
        • +
        • thecollegeview.ie
        • +
        • thelookonline.dcu.ie
        • +
        +
      • +
      • email (postfix and dovecot)
      • +
      • mailing lists (mailman)
      • +
      • *.redbrick.dcu.ie also points here
      • +
      +
    • +
    +
  • +
  • paphos - 136.206.15.53
      +
    • OS: Ubuntu 14.04 LTS
    • +
    • Services: +
    • +
    +
  • +
+

New Redbrick

+
    +
  • azazel - 136.206.16.24
      +
    • OS: Debian 12 bookworm
    • +
    • Services:
        +
      • primary ssh login box for users (see Logging in)
      • +
      • jump-box for admins
      • +
      +
    • +
    +
  • +
  • pygmalion - 136.206.16.25
      +
    • OS: Debian 12 bookworm
    • +
    • Services:
        +
      • secondary ssh login box for users (see Logging in)
      • +
      • jump-box for admins
      • +
      +
    • +
    +
  • +
+

Aperture

+

In aperture, things are done a little differently than on the other network. Instead of having a single host per service, aperture is configured to allow services to be allocated dynamically across all 3 servers using nomad, consul and traefik.

+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/services/gitea/index.html b/services/gitea/index.html new file mode 100644 index 00000000..b45ef05b --- /dev/null +++ b/services/gitea/index.html @@ -0,0 +1,2238 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Gitea - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

Gitea

+

Redbrick uses Gitea as an open source git host.

+ +

Deployment

+

Gitea and its database are deployed to Hardcase which runs NixOS

+
    +
  • The actual repositories are stored in /zroot/git and most other data is stored in /var/lib/gitea
  • +
  • The SECRET_KEY and INTERNAL_TOKEN_URI are stored in /var/secrets. They are not automatically created and must be copied when setting up new hosts. Permissions on the gitea_token.secret must be 740 and owned by git:gitea
  • +
  • Make sure that the gitea_token.secret does NOT have a newline character in it.
  • +
+

Other Notes

+

The Giteadmin credentials are in the passwordsafe.

+

Operation

+

Gitea is very well documented in itself. Here's a couple of special commands when deploying/migrating Gitea to a different host.

+
Bash
# Regenerate hooks which fixes push errors
+/path/to/gitea admin regenerate hooks
+
+# If you didn't copy the authorized_keys folder then regen that too
+/path/to/gitea admin regenerate keys
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/services/icecast/index.html b/services/icecast/index.html new file mode 100644 index 00000000..a06fc104 --- /dev/null +++ b/services/icecast/index.html @@ -0,0 +1,2294 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Icecast - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

Icecast

+

Icecast is a streaming server that we currently host on aperture.

+

We stream DCUFm's Broadcasts to their apps via a stream presented on dcufm.redbrick.dcu.ie.

+

Procedure

+

The configuration file for icecast is located in the nomad config repo.

+

It should just be a case of running nomad job plan clubs-socs/dcufm.hcl to plan and run the job.

+
+

Note

+

The job may bind to either the internal or external address. Ensure that if you make a change to the config, you inform DCUfm that they may need to switch which server they use.

+
+

Streaming to Icecast

+

DCUfm use butt on a desktop in their studio to stream to Icecast.

+

The desktop must be connected to the VPN to ensure the stream stays up, and traefik doesn't reset the connection every 10 seconds. The current icecast configuration for the server is 10.10.0.5:2333 or 136.206.16.5:2333 (see above note).

+

Read more about it in this issue.

+

A shortcut to the VPN is available on the desktop (change a shortcut to the binary to include --connect profile.ovpn.

+

See here).

+

DCUfm Cheat Sheet

+

This is a cheat sheet for DCUfm to help them stream to icecast.

+

Connecting to the VPN

+

You'll need to connect to the Redbrick VPN to stream to icecast. You can do this by double clicking the shortcut on the desktop.

+

You'll then need to go to bottom right corner of the screen and right click this icon:

+

Disconnected OpenVPN icon

+

A popup will appear, click connect. This will connect you to the VPN. It may take a second, but a window will pop up with a lot of text. The VPN will connect and then it'll close.

+

Connect to OpenVPN

+

You should end up with an icon like this:

+

Connected OpenVPN icon

+

You're now connected to the VPN.

+

Connecting to Icecast

+

You'll need to connect to icecast to stream to it. BUTT is the software we use to stream to icecast. You'll also find this on the desktop. Once its open, (and you're connected to the VPN), press the small "play" button in the top left corner. This will start your stream to the server.

+

The username and password should already be configured in the software. If not, ask a redbrick sysadmin for the login details.

+
+

Warning!

+

If you find that butt is not connecting, then you may need to switch which server you're connecting to. To do this, go to settings, and then the "Main" tab. In the dropdown, select either DCUfm 1 or DCUfm 2 (try both, one will definitely work).

+
+

Saving Your Stream

+

Your stream will be saved automatically onto the desktop into a folder called Recordings YYYY (where YYYY is the current year), with the date and time of the recording, and the format .mp3. Take this file with you (via a USB or similar) if you want to keep it for later, it will not be kept on the desktop for long!

+

Further Information

+

If you have any questions, please ask a redbrick sysadmin.

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/services/index.html b/services/index.html new file mode 100644 index 00000000..67c79ec6 --- /dev/null +++ b/services/index.html @@ -0,0 +1,2187 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Services - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

Preface

+

Here you will find a list of all the services Redbrick runs, along with some configs and some important information surrounding them.

+ +

Adding More Services

+

In order to add a new service, you will need to edit the docs repository.

+

Adding a new service is as easy as creating a new file in docs/services/ with an appropriate name, and the page will be automatically added to the navigation pane.

+

Try to keep file names short and concise, limited to one word if possible and avoid using spaces.

+

The style guide for a service file should be as follows:

+
Markdown
---
+title: ServiceName
+author:
+  - username
+tags:
+  - relevant
+  - tags
+
+---
+
+# ServiceName - `username`
+
+Short description on how the service works and where it is running
+
+## Configuration
+
+Add some possible useful configs here, like a docker-compose file,
+certain command you may have had to run, or something that is not very obvious.
+Look at other services for hints on this.
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/services/irc/index.html b/services/irc/index.html new file mode 100644 index 00000000..84489f32 --- /dev/null +++ b/services/irc/index.html @@ -0,0 +1,2282 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + IRC - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

IRC

+

Redbrick InspIRCd

+

In 2016/2017 we began work to move to InspIRCd. This was due to the complications in ircd-hybrid and how old it was. These complications stopped new netsocs joining us so we all agreed to move irc. $ 4 years later after multiple attempts we had not migrated. Until TCD decided to shutdown their server breaking the network.

+

We run Inspircd v3 on Metharme. InspIRCd's docs can be found here for configuration specifics.

+

IRC is available at irc.redbrick.dcu.ie on port 6697. SSL is required for connection, we do not support non-SSL. When connecting from a redbrick server a user will be automatically logged in. If connecting from an external server a user must pass their password on login.

+

For the purpose of external peering of other servers the port 7001 is exposed as well. Similarly to clients we only support SSL on this port.

+

For docs on connecting and using an IRC client please refer to the wiki.

+

Installation

+

InspIRCd is installed with Nix. There is no Nix package for InspIRCd so we compile a specific git tag from source. See Nix package for details on how it is compiled. Given we only support SSL and require LDAP, we need to enable both at compile time.

+

Configuration

+

InspIRCd's configuration is in Nix here. This config will be converted to xml on disc.

+

Important Configuration

+
    +
  • oper is a list of admin users on the irc server. Their OPER password will need to be manually hashed with hmac-sha256, and placed in a secret on the server to be read in by inspircd.
  • +
  • ldapwhitelist is a list of cidr addresses that do no require authentication. The list consists of Redbrick public and private addresses as well as oldsoc.
  • +
  • link is a list of all servers we peer with including the anope services server that runs on the same box.
  • +
+

oldsoc.net

+

oldsoc.net is a server run by old TCD netsocers. All the users on it are the remaining TCD associates following the shutdown of TCD IRCd. This server is maintained by its own users and has explicit permission to join IRC without LDAP auth.

+

Anope

+

Redbrick runs Anope services for the entire network. As with inspircd we compile from source. Refer to anopes github docs for configuration specifics.

+

Our current Anope is configured with standard mods of chanserv, nickserv and operserv. All config is in here.

+

Anope stores all info in a custom db file on disk.

+

Discord Bridge - butlerx

+

We run a bridge between the Redbrick Discord and irc. The configuration for this is here.

+

The bridge adds all users from discord with the suffix _d2 and all irc users appear as them self but tagged as a bot in discord. Not all discord channels are on IRC, the config above contains a mapping of irc channels to discord channels id's. This needs to be manually updated to add more channels.

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/services/ldap/index.html b/services/ldap/index.html new file mode 100644 index 00000000..3f579d3d --- /dev/null +++ b/services/ldap/index.html @@ -0,0 +1,2488 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + LDAP - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

LDAP - m1cr0man

+

LDAP is our directory service. It stores usernames, passwords, UIDs, quotas, and other user specific info.

+

LDAP's structure is different to most other database systems. If you are not familiar with it, I recommend investing some time into looking at how schemas and distinguished names work.

+

Deployment

+
    +
  • OpenLDAP is deployed with Nix to Daedalus and Icarus
  • +
  • Daedalus is the master, Icarus is slaved to it and can be used as a read only failover
  • +
  • ldap.internal and ldap2.internal are slaved to Daedalus + Icarus respectively
  • +
  • Both servers store their data in /var/db/openldap
  • +
  • The ldap.secret, which should ALWAYS have permissions 400, and owned by the openldap user, is stored in /var/secrets. It is not automatically created and must be copied when setting up new hosts
  • +
  • rb-ldap and useradm are wrappers around LDAP that are custom built
  • +
+

Redbrick Special Notes

+
    +
  • The root user password is in the passwordsafe
  • +
  • The OID for most of the schema is DCU's
  • +
  • The configs that exist for NixOS were mostly ported from our last
    + LDAP server (paphos) to maintain compatibility
  • +
  • At the time of writing, LDAP is not configured with TLS
  • +
  • There are 2 scripts to manage quotas on /storage that run on the server serving NFS (zfsquota and zfsquotaquery). They are covered under the NFS documentation.
  • +
  • There's a user in ldap called testing, for testing. The password is in pwsafe.
  • +
+

Operation

+

The ldap* suite of commands can be used to manage LDAP. Their man pages are very well documented, but we've provided most common operations below.

+

Note that the ldap.secret is a crypted file, and not equal to the actual password you need to run ldap commands.

+

Ldapsearch Recipes

+

ldapsearch can be used with and without authenticating as root. Without root, some fields (such as the password hash, altmail) will be hidden.

+
Bash
# Dump the entire LDAP database in LDIF form, which can be used as a form of backup
+ldapsearch -b o=redbrick -xLLL -D cn=root,ou=ldap,o=redbrick -y /path/to/passwd.txt
+
+# Find a user by name, and print their altmail
+ldapsearch -b o=redbrick -xLLL -D cn=root,ou=ldap,o=redbrick -y /path/to/passwd.txt uid=m1cr0man altmail
+
+# Find quotas for all users edited by m1cr0man
+ldapsearch -b o=redbrick -xLLL updatedby=m1cr0man quota
+
+# Find all member's usernames
+ldapsearch -b o=redbrick -xLLL objectClass=member uid
+
+# Find all expired users. Notice here that you can query by hidden fields, but you can't read them
+ldapsearch -b o=redbrick -xLLL 'yearsPaid < 1' uid
+
+

Ldapmodify Recipes

+

You can instead pass a file with -f when necessary.

+

To test a command add -n for no-op mode.

+

Changing updatedby and updated is added to each command as good practise.

+
Bash
# Add quota info to a user
+ldapmodify -x -D cn=root,ou=ldap,o=redbrick -y /path/to/passwd.txt << EOF
+dn: uid=testing,ou=accounts,o=redbrick
+changetype: modify
+add: quota
+quota: 3G
+-
+replace: updatedby
+updatedby: $USER
+-
+replace: updated
+updated: $(date +'%F %X')
+EOF
+
+# Change a user's shell
+ldapmodify -x -D cn=root,ou=ldap,o=redbrick -y /path/to/passwd.txt << EOF
+dn: uid=testing,ou=accounts,o=redbrick
+changetype: modify
+replace: loginShell
+loginShell: /usr/local/shells/disusered
+-
+replace: updatedby
+updatedby: $USER
+-
+replace: updated
+updated: $(date +'%F %X')
+EOF
+
+# Update yearsPaid
+ldapmodify -x -D cn=root,ou=ldap,o=redbrick -y /path/to/passwd.txt << EOF
+dn: uid=testing,ou=accounts,o=redbrick
+changetype: modify
+replace: yearsPaid
+yearsPaid: 1
+-
+replace: updatedby
+updatedby: $USER
+-
+replace: updated
+updated: $(date +'%F %X')
+EOF
+
+

Ldapadd Recipes

+

Occasionally you'll need to add people or things to ldap manually, such as a user you're recreating from backups, or a reserved system name such as a new machine. This is where ldapadd comes in.

+
Bash
# Create a file to read the new entry from
+cat > add.ldif << EOF
+dn: uid=redbrick,ou=reserved,o=redbrick
+uid: redbrick
+description: DNS entry
+objectClass: reserved
+objectClass: top
+EOF
+
+# Import the ldif
+ldapadd -x -D cn=root,ou=ldap,o=redbrick -y /path/to/passwd.txt -f add.ldif
+
+# Note if you are importing a full ldif onto a new server, use slapadd instead
+# Ensure slapd is not running first
+slapadd -v -l backup.ldif
+
+

Other Recipes

+

On a yearly basis, the yearsPaid fields must be incremented for every users, and last year's newbies need to be not newbies anymore.

+

Remember to take off -n when you are ready to rock.

+

Adding the updated and updatedby fields from above to these queries would be a good idea.

+
Bash
# Decrement yearsPaid
+# WARNING NOT IDEMPOTENT, RUN ONCE
+ldapsearch -b o=redbrick -xLLL -D cn=root,ou=ldap,o=redbrick -y /path/to/passwd.txt objectClass=member yearsPaid |\
+tee yearsPaid-$(date +'%F').backup.ldif |\
+awk '/yearsPaid/ { print "changetype: modify\nreplace: yearsPaid\nyearsPaid: " $2 - 1 } ! /yearsPaid/ {print $0}' |\
+ldapmodify -x -D cn=root,ou=ldap,o=redbrick -y /path/to/passwd.txt -n
+
+# De-newbie last year's users
+ldapsearch -b o=redbrick -xLLL -D cn=root,ou=ldap,o=redbrick -y /path/to/passwd.txt newbie=TRUE dn |\
+tee newbie-$(date +'%F').backup.ldif |\
+awk '/^dn/ {print $0"\nchangetype: modify\nreplace: newbie\nnewbie: FALSE\n"}' |\
+ldapmodify -x -D cn=root,ou=ldap,o=redbrick -y /path/to/passwd.txt -n
+
+# Set quotas of users without quotas
+ldapsearch -b o=redbrick -xLLL '(&(objectClass=posixAccount)(!(quota=*)))' dn |\
+awk '/^dn/ {print $0"\nchangetype: modify\nadd: quota\nquota: 2G\n"}' |\
+ldapmodify -x -D cn=root,ou=ldap,o=redbrick -y /path/to/passwd.txt -n
+
+

Troubleshooting

+

First off, it's worth calling out that if you are coming here to find help with a client side issue, chances are the DNS rule applies:

+
+

It's probably not LDAP

+
+

With that out of the way, here's some things to check - in order.

+

Check Reachability of LDAP

+

Run from the master and also from the problem client. It should return m1cr0man's details. If you get an invalid credentials or object not found check that the LDAP auth config hasn't changed. If you get a connection error then restart the service.

+
Bash
ldapsearch -h ldap.internal -p 389 -xLLL -b o=redbrick uid=m1cr0man
+
+

Verify LDAP Can Be Written to

+

Get the password from the passwordsafe. Run this from the master.

+
Bash
ldapmodify -D cn=root,ou=ldap,o=redbrick -x -y filewithpwd.txt << EOF
+dn: uid=m1cr0man,ou=accounts,o=redbrick
+changetype: modify
+replace: quota
+quota: 3G
+EOF
+
+

Run the command from the first troubleshooting step to verify the value changed.

+

If it fails with an auth issue, triple check your password file (it should contain the plain text password). If it fails with a non-auth issue, then check the service logs.

+

Enable Debug Logging

+

OpenLDAP produces a nice set of logs when the loglevel is not set.

+

Remove loglevel from extraConfig in the Nix config and switch, then run this command to tail the logs:

+
Bash
journalctl -fu openldap
+
+

Re-syncing Secondary LDAP Server(s)

+

In the event a secondary server becomes out of sync with the master, it can be synced by stopping the server, deleting its database files, then restarting the server. Do this after ensuring that config.redbrick.ldapSlaveTo is set correctly.

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/services/md/index.html b/services/md/index.html new file mode 100644 index 00000000..b3f54b68 --- /dev/null +++ b/services/md/index.html @@ -0,0 +1,2281 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + MD (HedgeDoc) - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

HedgeDoc - wizzdom

+

HedgeDoc is deployed with nomad on aperture as a docker container. It is accessible through md.redbrick.dcu.ie.

+

HedgeDoc auths against LDAP and its configuration is available here

+

All sensitive variables are stored in the consul KV store.

+

The important points are as follows:

+
    +
  • connecting to the database:
  • +
+
Nomad
CMD_DB_URL = "postgres://{{ key "hedgedoc/db/user" }}:{{ key "hedgedoc/db/password" }}@{{ env "NOMAD_ADDR_db" }}/{{ key "hedgedoc/db/name" }}"
+
+
    +
  • disabling anonymous users and email signup:
  • +
+
Nomad
CMD_ALLOW_EMAIL_REGISTER = "false"
+CMD_ALLOW_ANONYMOUS      = "false"
+CMD_EMAIL                = "false"
+
+
    +
  • LDAP configuration:
  • +
+
Nomad
CMD_LDAP_URL             = "{{ key "hedgedoc/ldap/url" }}"
+CMD_LDAP_SEARCHBASE      = "ou=accounts,o=redbrick"
+CMD_LDAP_SEARCHFILTER    = "{{`(uid={{username}})`}}"
+CMD_LDAP_PROVIDERNAME    = "Redbrick"
+CMD_LDAP_USERIDFIELD     = "uidNumber"
+CMD_LDAP_USERNAMEFIELD   = "uid"
+
+

See the HedgeDoc docs for more info on configuration.

+

Backups

+

The HedgeDoc database is backed up periodically by a nomad job, the configuration for which is here.

+

The bulk of this job is this script which:

+
    +
  • grabs the alloc_id of the currently running HedgeDoc allocation from nomad
  • +
  • execs into the container running pg_dumpall dumping the database into a file with the current date and time
  • +
  • if the backup is unsuccessful the script notifies the admins on discord via a webhook.
  • +
+
Bash
#!/bin/bash
+
+file=/storage/backups/nomad/postgres/hedgedoc/postgresql-hedgedoc-$(date +%Y-%m-%d_%H-%M-%S).sql
+
+mkdir -p /storage/backups/nomad/postgres/hedgedoc
+
+alloc_id=$(nomad job status hedgedoc | grep running | tail -n 1 | cut -d " " -f 1)
+
+job_name=$(echo ${NOMAD_JOB_NAME} | cut -d "/" -f 1)
+
+nomad alloc exec -task hedgedoc-db $alloc_id pg_dumpall -U {{ key "hedgedoc/db/user" }} > "${file}"
+
+find /storage/backups/nomad/postgres/hedgedoc/postgresql-hedgedoc* -ctime +3 -exec rm {} \; || true
+
+if [ -s "$file" ]; then # check if file exists and is not empty
+  echo "Backup successful"
+  exit 0
+else
+  rm $file
+  curl -H "Content-Type: application/json" -d \
+  '{"content": "<@&585512338728419341> `PostgreSQL` backup for **'"${job_name}"'** has just **FAILED**\nFile name: `'"$file"'`\nDate: `'"$(TZ=Europe/Dublin date)"'`\nTurn off this script with `nomad job stop '"${job_name}"'` \n\n## Remember to restart this backup job when fixed!!!"}' \
+  {{ key "postgres/webhook/discord" }}
+fi
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/services/nfs/index.html b/services/nfs/index.html new file mode 100644 index 00000000..77335e08 --- /dev/null +++ b/services/nfs/index.html @@ -0,0 +1,2373 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + NFS - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

NFS / Network File Storage

+

NFS is used to serve the notorious /storage directory on Icarus to all of Redbrick's machines, which in turn serves /home, /webtree and some other critical folders.

+

Deployment

+
    +
  • NFS is deployed with Nix on Icarus
  • +
  • It is backed onto the PowerVault MD1200 with all its disk passed through single-drive RAID 0s toallow for setup of ZFS:
      +
    • 1 mirror of 2x 500GB drives
    • +
    • 1 mirror of 2x 750GB drives
    • +
    • 1 mirror of 2x 1TB drives
    • +
    • Stripe across all the mirrors for 2TB of usable storage
    • +
    • 1 hot spare 750GB drive
    • +
    +
  • +
  • ZFS is configured with compression onand dedup off
  • +
  • The ZFS pool is called zbackup
  • +
+

Redbrick Special Notes

+

On each machine where /storage is where NFS is mounted, but /home and /webtree are symlinks into there.

+

There are 2 scripts used to control quotas, detailed below.

+

NFS is backed up to Albus via ZnapZend.

+

zfsquota And zfsquotaquery

+

These are two bash scripts that run as systemd services on Icarus to manage quotas. This is achieved through getting and setting the userquota and userused properties of the ZFS dataset.

+

Zfsquota

+

ZFSQuota will read the quota field from LDAP and sync this with the userquota value on the dataset. It is not event driven - it runs on a timer every 3 hours and syncs all LDAP quotas with ZFS. It can be kicked off manually, which is described below. Users with no quota in LDAP will have no quota in /storage, and users who have their quota removed will persist on ZFS.

+

Changing user names has no impact on this since it is synced with uidNumber.

+

Zfsquotaquery

+

ZFSQuotaQuery returns the quota and used space of a particular user. This is used to then inform rbquota which provides the data for the MOTD used space report. Both of these scripts are defined and deployed in the Nix config repo. It runs on port 1995/tcp.

+

Operation

+

In general, there isn't too much to do with NFS. Below are some commands of interest for checking its status.

+
Bash
# On the NFS server, list the exported filesystems
+showmount -e
+
+# Get the real space usage + fragmentation percent from ZFS
+zpool list zbackup
+
+# Check a user's quota
+zpool get userquota@m1cr0man zbackup
+zpool get userused@m1cr0man zbackup
+
+# Delete a quota from ZFS (useful if a user is deleted)
+zpool set userquota@123456=none zbackup
+
+# Get all user quota usage, and sort it by usage
+zfs userspace -o used,name zbackup | sort -h | tee used_space.txt
+
+# Resync quotas (this command will not return until it is finished)
+systemctl start zfsquota
+
+# Check the status of zfsquotaquery
+systemctl status zfsquotaquery
+
+

Troubleshooting

+

In the event where clients are unable to read from NFS, your priority should be restoring the NFS server, rather than

+

unmounting NFS from clients. This is because NFS is mounted in hard mode everywhere, meaning that it will block on IO until a request can be fulfilled.

+

Check The Server

+
Bash
# Check the ZFS volume is readable and writable
+ls -l /zbackup/home
+touch /zbackup/testfile
+
+# Check that rpc.mountd, rpc.statd and rpcbind are running and lisening
+ss -anlp | grep rpc
+
+# Check the above services for errors (don't worry about blkmap)
+systemctl status nfs-{server,idmapd,mountd}
+journalctl -fu nfs-server -u nfs-idmapd -u nfs-mountd
+
+

Check The Client

+
Bash
# Check for connection to NFS
+ss -atp | grep nfs
+
+# Check the fstab entry
+grep storage /etc/fstab
+
+# Check if the NFS server port can be reached
+telnet 192.168.0.150 2049
+# Entering gibberish should cause the connection to close
+
+# Remount read-only
+mount -o remount,ro /storage
+
+# Not much left you can do but remount entirely or reboot
+
+

Rolling Back or Restoring a Backup

+

See znapzend

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/services/nomad/index.html b/services/nomad/index.html new file mode 100644 index 00000000..54c1ed03 --- /dev/null +++ b/services/nomad/index.html @@ -0,0 +1,2352 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Nomad - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

Nomad - distro, wizzdom

+
+

Adapted from redbrick/nomad README

+
+

What is Nomad?

+

Good question!

+
+

Nomad is a simple and flexible scheduler and orchestrator to deploy and manage
+containers and non-containerized applications
+- Nomad Docs

+
+

Deploying a Nomad Job

+

All Nomad job related configurations are stored in the nomad directory.

+

The terminology used here is explained here. This is required reading.

+
    +
  • Install Nomad on your machine here
  • +
  • Clone this repo
  • +
+
Bash
git clone git@github.com:redbrick/nomad.git
+
+
    +
  • Connect to the admin VPN
  • +
  • Set the NOMAD_ADDR environment variable:
  • +
+
Bash
export NOMAD_ADDR=http://<IP-ADDRESS-OF-HOST>:4646
+
+
    +
  • Check you can connect to the nomad cluster:
  • +
+
Bash
nomad status
+
+
    +
  • You should receive a list back of all jobs, now you are ready to start deploying!
  • +
+
Bash
nomad job plan path/to/job/file.hcl
+
+

This will plan the allocations and ensure that what is deployed is the correct version.

+

If you are happy with the deployment, run

+
Bash
nomad job run -check-index [id from last command] path/to/job/file.hcl
+
+

This will deploy the planned allocations, and will error if the file changed on disk between the plan and the run.

+

You can shorten this command to just

+
Bash
nomad job plan path/to/file.hcl | grep path/to/file.hcl | bash
+
+

This will plan and run the job file without the need for you to copy and paste the check index id. Only use this once you are comfortable with how Nomad places allocations.

+

Restart a Nomad Job

+
    +
  • First, stop and purge the currently-running job
  • +
+
Bash
nomad job stop -purge name-of-running-job
+
+
    +
  • Run a garbage collection of jobs, evaluations, allocations, nodes and reconcile summaries of all registered jobs.
  • +
+
Bash
nomad system gc
+
+nomad system reconcile summaries
+
+nomad system gc # (yes, again)
+
+
    +
  • Plan and run the job
  • +
+
Bash
nomad job plan path/to/job/file.hcl
+
+nomad job run -check-index [id from last command] path/to/job/file.hcl
+
+

Exec into Container

+

At times it is necessary to exec into a docker container to complete maintenance, perform tests or change configurations. The syntax to do this on nomad is similar to docker exec with some small additions:

+
Bash
nomad alloc exec -i -t -task <task-name> <nomad-alloc-id> <command>
+
+

Where:

+
    +
  • <task-name> is the name of the task you want to exec into (only needed when there is more than one task in job)
  • +
  • <nomad-alloc-id> is the id for the currently running allocation, obtained from the web UI, nomad CLI, or nomad API
  • +
  • <command> is the command you want to run. e.g. sh, rcon-cli
  • +
+

Cluster Configuration

+

nomad/cluster-config contains configuration relating to the configuration of the cluster including:

+ +

Node Pools

+

Node pools are a way to group nodes together into logical groups which jobs can target that can be used to enforce where allocations are placed.

+

e.g. ingress-pool.hcl is a node pool that is used for ingress nodes such as the bastion-vm. Any jobs that are defined to use node_pool = "ingress" such as traefik.hcl and gate-proxy.hcl will only be assigned to one of the nodes in the ingress node pool (i.e. the bastion VM)

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/services/paste/index.html b/services/paste/index.html new file mode 100644 index 00000000..779cd893 --- /dev/null +++ b/services/paste/index.html @@ -0,0 +1,2251 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Pastebin - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

Pastebin - wizzdom

+

Redbrick currently uses Privatebin as a paste utility accessible at paste.redbrick.dcu.ie and paste.rb.dcu.ie

+

Privatebin

+

The Privatebin instance is deployed with nomad on aperture. Its configuration is available here. Privatebin doesn't support full configuration via environment variables but instead uses a conf.php file. This is passed in using nomad templates.

+

All sensitive variables are stored in the consul KV store.

+

The main points are as follows:

+
    +
  • configure URL shortener (shlink)
  • +
+
conf.php
urlshortener = "https://s.rb.dcu.ie/rest/v1/short-urls/shorten?apiKey={{ key "privatebin/shlink/api" }}&format=txt&longUrl="
+
+
    +
  • enable file upload, set file size limit and enable compression
  • +
+
conf.php
fileupload = true
+sizelimit = 10485760
+compression = "zlib"
+
+
    +
  • Connect to PostgreSQL database
  • +
+
conf.php
[model]
+class = Database
+[model_options]
+dsn = "pgsql:host=postgres.service.consul;dbname={{ key "privatebin/db/name" }}"
+tbl = "privatebin_"     ; table prefix
+usr = "{{ key "privatebin/db/user" }}"
+pwd = "{{ key "privatebin/db/password" }}"
+opt[12] = true    ; PDO::ATTR_PERSISTENT ; use persistent connections - default
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/services/servers/index.html b/services/servers/index.html new file mode 100644 index 00000000..0e321014 --- /dev/null +++ b/services/servers/index.html @@ -0,0 +1,2417 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Servers - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

Servers

+

Redbrick provides two main servers (Azazel and Pygmalion) for it's members to use for various use cases, for example running applications or user programs.

+

Entrypoints

+

The main login server used in Redbrick is Azazel. You may also log in to Pygmalion if you wish at pyg.redbrick.dcu.ie

+

2 Factor Authentication is required to log in to Redbrick servers. This is done via an SSH key and your Redbrick username/password combination. For more information on how to create an SSH key, and configure your account for 2FA, please read below.

+

Logging in

+

You've set up 2FA on your account with an SSH key, right? If not, you really have to, I'm sorry.

+

You can log in using SSH in your command prompt or terminal application of choice with your Redbrick username and password like so:

+
Bash
ssh YOUR_USERNAME@redbrick.dcu.ie -i SSH_KEY_LOCATION_PATH
+
+# When prompted for the password, please input your Redbrick account password.
+# NOTE: The "-i" flag specifies the location of your private ssh key.
+
+

Alternatives

+

If you are an unbothered king/queen that simply does not mind using a web interface, let me introduce you to wetty.redbrick.dcu.ie. You do not need an SSH key here.

+

Logging in to other Servers

+

Your home directory is synced (i.e the same) on all public Redbrick servers. Thus the authorized_keys file will be the same on Azazel as it is on Pygmalion, meaning you can log in to pyg.redbrick.dcu.ie too, and so on.

+

Setting up an SSH Key

+

Generating an SSH key pair creates two long strings of characters: a public and a private key. You can place the public key on any server, and then connect to the server using an SSH client that has access to the private key.

+

When these keys match up, and your account password is also correct, you are granted authorisation to log in.

+

1. Creating the Key Pair

+

On your local computer, in the command line of your choice, enter the following command:

+
Bash
ssh-keygen -t ed25519
+
+

Expected Output

+
Text Only
Generating public/private ed25519 key pair.
+
+

2. Providing Some Extra Details

+

You will now be prompted with some information and input prompts:

+
    +
  • The first prompt will ask where to save the keys.
  • +
+
Text Only
Enter file in which to save the key (e.g /home/bob/.ssh/id_ed25519):
+
+

You can simply press ENTER here to save them at the default location (.ssh directory in your home directory). Alternatively you can specify a custom location if you wish.

+
    +
  • The second prompt will ask for a new passphrase to protect the key.
  • +
+
Text Only
Enter passphrase (empty for no passphrase):
+
+

Here you may protect this key file with a passphrase. This is optional and recommended for security.

+
+

Note

+

If you do not wish to add a passphrase to save you all that typing, simply press ENTER for the password and confirmation password prompts.

+
+

The newly generated public key should now be saved in /home/bob/.ssh/id_ed25519.pub. The private key is the same file is at /home/bob/.ssh/id_ed25519. (i.e under the .ssh folder in your user home directory.)

+
NOTE FOR WINDOWS (you heathen)
+

This key is saved under .ssh under your User directory. (i.e C:\Users\Bob\.ssh\id_ed25519)

+

3. Copying the Public Key to the Server

+

In this step we store our public key on the server we intend to log in to. This key will be used against our secret private key to authenticate our login.

+

For the purposes of this tutorial we will be using Pygmalion (pyg.redbrick.dcu.ie) as our server.

+

Logging in to Wetty

+

In order to access the server to actually place our keys in it, we need to log in via Wetty - a shell interface for Pygmalion on the web.

+ +

You should see this prompt:

+
Text Only
pygmalion.redbrick.dcu.ie login:
+
+

Enter your Redbrick username and press ENTER. When prompted, enter your Redbrick password. Forgot either of these?

+

Adding the Key into the authorized_keys File

+
    +
  • Add the key
  • +
+

Grab the contents of your public key. You may use the cat filepath command for this:

+
Bash
cat /home/bob/.ssh/id_ed25519.pub
+
+

On Wetty, enter the following command in the shell, with YOUR_KEY replaced with your public ssh key.

+
Bash
echo "YOUR_KEY" >> ~/.ssh/authorized_keys
+
+

This command will append your public key to the end of the authorized_keys file.

+
+

Note!

+

The speech marks surrounding YOUR_KEY are important!

+
+
PSSST… Made a mistake?
+
Text Only
*You can manually edit the authorized_key file in a text editor with the following command to fix any issues:*
+
+
Bash
nano ~/.ssh/authorized_keys
+
+

Congratulations! If you've made it this far, you're ready to login now.

+

Forgot Your Password?

+

Contact an admin on our Discord Server or at elected-admins@redbrick.dcu.ie

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/services/socs/index.html b/services/socs/index.html new file mode 100644 index 00000000..e5e8ca38 --- /dev/null +++ b/services/socs/index.html @@ -0,0 +1,2165 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Socs using Redbrick Infrastructure - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

Socs Using Redbrick Infrastructure

+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/services/traefik/index.html b/services/traefik/index.html new file mode 100644 index 00000000..c255ff26 --- /dev/null +++ b/services/traefik/index.html @@ -0,0 +1,2158 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Traefik - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

Traefik

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/services/user-vms/index.html b/services/user-vms/index.html new file mode 100644 index 00000000..6a2746de --- /dev/null +++ b/services/user-vms/index.html @@ -0,0 +1,2318 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + User VMs - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

User VMs

+

User VMs are deployed on aperture using nomad's QEMU driver.

+

Each VM is configured with cloud-init. Those configuration files are served by wheatley, but they can be served by any HTTP server.

+

Setting up Networking on the Host

+

The host needs to be configured to allow the VMs to communicate with each other. This is done by creating a bridge and adding the VMs to it.

+

Create a Bridge

+

To create a bridge that qemu can use to place the guest (VM) onto the same network as the host, follow the instructions listed here for iproute2, summarised below.

+

We need to create a bridge interface on the host.

+
Bash
sudo ip link add name br0 type bridge
+sudo ip link set dev br0 up
+
+

We'll be adding a physical interface to this bridge to allow it to communicate with the external (UDM) network.

+
Bash
sudo ip link set eno2 master br0
+
+

You'll need to assign an IP address to the bridge interface. This will be used as the default address for the host. You can do this with DHCP or by assigning a static IP address. The best way to do this is to create a DHCP static lease on the UDM for the bridge interface MAC address.

+
+

Note

+

TODO: Find out why connectivity seems to be lost when the bridge interface receives an address before the physical interface.
+If connectivity is lost, release the addresses from both the bridge and the physical interface (in that order) with sudo dhclient -v -r <iface> and then run sudo dhclient -v <iface> to assign the bridge interface an address.

+
+

Add the VMs to the Bridge

+

The configuration of the qemu network options in the job file will create a new tap interface and add it to the bridge and the VM. I advise you for your own sanity to never touch the network options, they will only cause you pain.

+

For others looking, this configuration is specific to QEMU only.

+
Bash
qemu-system-x86_64 ... -netdev bridge,id=hn0 -device virtio-net-pci,netdev=hn0,id=nic1
+
+

This will assign the VM an address on the external network. The VM will be able to communicate with the host and other VMs in the network.

+

You must also add allow br0 to /etc/qemu/bridge.conf to allow qemu to add the tap interfaces to the bridge. Source

+

The VMs, once connected to the bridge, will be assigned an address via DHCP. You can assign a static IP address to the VMs by adding a DHCP static lease on the UDM for the VMs MAC address. You can get the address of a VM by checking the nomad alloc logs for that VM and searching for ens3.

+
Bash
nomad job status distro-vm | grep "Node ID" -A 1 | tail -n 1 | cut -d " " -f 1
+# <alloc-id>
+nomad alloc logs <alloc-id> | grep -E "ens3.*global" | cut -d "|" -f 4 | xargs
+# cloud init... ens3: <ip-address> global
+
+

Configuring the VMs

+

The VMs are configured with cloud-init. Their docs are pretty good, so I won't repeat them here. The files can be served by any HTTP server, and the address is placed into the job file in the QEMU options.

+
Nomad
...
+        args = [
+          ...
+          "virtio-net-pci,netdev=hn0,id=nic1,mac=52:54:84:ba:49:22", # make sure this MAC address is unique!!
+          "-smbios",
+          "type=1,serial=ds=nocloud-net;s=http://136.206.16.5:8000/",
+        ]
+...
+
+

Here in the args block:

+
    +
  • we define that the VM will have a network device using the virtio driver, we pass it an id and a random unique MAC address
  • +
  • we tell it to use smbios type 1 and to grab its cloud-init configs from http://136.206.16.5:8000/
  • +
+
+

Note

+

If you're running multiple VMs on the same network make sure to set different MAC addresses for each VM, otherwise you'll have a bad time.

+
+

Creating a New VM

+

To create a new VM, you'll need to create a new job file and a cloud-init configuration file. Copy any of the existing job files and modify them to suit your needs. The cloud-init configuration files can be copied and changed based on the user also. Remember to ensure the MAC addresses are unique!

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/services/wetty/index.html b/services/wetty/index.html new file mode 100644 index 00000000..29e1d381 --- /dev/null +++ b/services/wetty/index.html @@ -0,0 +1,2199 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Wetty - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + + + + +

Wetty - wizzdom

+

Redbrick uses Wetty as our web terminal of choice. It is accessible at wetty.redbrick.dcu.ie, wetty.rb.dcu.ie,term.redbrick.dcu.ie, anyterm.redbrick.dcu.ie and ajaxterm.redbrick.dcu.ie.

+

Why all the different domains? - For legacy reasons!

+

The configuration is located here

+

The configuration for Wetty is pretty straightforward:

+
    +
  • SSHHOST - the host that Wetty will connect to (one of the Login boxes), defined in consul
  • +
  • SSHPORT - the port used for ssh
  • +
  • BASE - the base path for Wetty (default is /wetty)
      +
    • This isn't very well documented but trust the process. It works!!
    • +
    +
  • +
+
Nomad
SSHHOST={{ key "wetty/ssh/host" }}
+SSHPORT=22
+BASE=/
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/services/znapzend/index.html b/services/znapzend/index.html new file mode 100644 index 00000000..cf9698bc --- /dev/null +++ b/services/znapzend/index.html @@ -0,0 +1,2271 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + ZnapZend - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

ZnapZend

+

Overview

+

ZnapZend is used to back up the NFS ZFS dataset from our NFS server to Albus.

+

It can also be used to back up other ZFS datasets on other hosts, but at the time of writing NFS is the only thing being backed up this way.

+

ZnapZend runs on the client and sends backups to Albus over SSH using zfs send | zfs receive piping.

+

The backup strategy can be viewed in the NixOS configuration.

+

Adding Another Backup

+

There is not much manual configuration to add a host to the ZnapZend backups.

+
    +
  1. Create an SSH key for the root user with no passphrase on the host you want to send the backups from. Use
    +ssh-keygen -t ed25519.
  2. +
  3. Add this new SSH public key to the rbbackup user's authorized keys on Albus.
  4. +
  5. Try SSHing to rbbackups@albus.internal to load the host key and test the passwordless authentication.
  6. +
  7. Import the znapzend service config
    +on the sending host and configure redbrick.znapzendSourceDataset and redbrick.znapzendDestDataset. Then apply the config.
  8. +
+
+

Note

+

The DestDataset must be unique across all configured backups/servers.

+
+

Debugging

+

Znapzend runs at the top of every hour to make backups. You can watch the progress with journalctl -fu znapzend.service.

+

Failures are usually caused by incorrect SSH configuration, so make sure that passwordless auth using the sending host's root SSH key is working.

+

Rolling Back NFS

+

If the NFS server is online and functional, you do not need to involve Albus to roll back changes, as all the snapshots

+

are kept on Icarus too.

+
    +
  1. Find the snapshot you want to restore with zfs list -t snapshot.
  2. +
  3. Run zfs rollback $snapshotname.
  4. +
+

That's it! These instructions obviously work for backups other than NFS too, should any ever exist.

+

Restoring NFS from a Backup

+

If the NFS server has died or you are creating a copy of it, here's how to pull the dataset from Albus,

+
    +
  1. On Albus, find the snapshot you want to restore with zfs list -t snapshot.
  2. +
  3. Open a screen/tmux, and copy the snapshot to a dataset in your target ZFS pool with:
  4. +
+

bash +ssh albus zfs send -vRLec $snapshotname | zfs receive $newpool/$datasetname`

+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 00000000..7e1039ca --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,223 @@ + + + + https://docs.redbrick.dcu.ie/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/contact/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/hardware/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/hardware/azazel/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/hardware/paphos/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/hardware/pygmalion/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/hardware/zeus/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/hardware/aperture/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/hardware/aperture/about/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/hardware/aperture/chell/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/hardware/aperture/glados/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/hardware/aperture/images/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/hardware/aperture/johnson/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/hardware/aperture/wheatley/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/hardware/network/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/hardware/network/arse/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/hardware/network/cerberus/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/hardware/network/mordor/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/hardware/network/switches/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/hardware/nix/hardcase/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/hardware/nix/icarus/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/hardware/nix/motherlode/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/procedures/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/procedures/ansible/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/procedures/cheatsheet/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/procedures/handover/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/procedures/irc-ops/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/procedures/new-admins/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/procedures/nixos/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/procedures/open-governance-tagging/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/procedures/policies/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/procedures/post-powercut/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/procedures/update-wp-domain/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/procedures/vpn/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/services/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/services/api/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/services/bastion-vm/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/services/bind/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/services/consul/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/services/exposed/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/services/gitea/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/services/icecast/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/services/irc/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/services/ldap/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/services/md/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/services/nfs/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/services/nomad/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/services/paste/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/services/servers/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/services/socs/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/services/traefik/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/services/user-vms/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/services/wetty/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/services/znapzend/ + 2024-09-30 + + + https://docs.redbrick.dcu.ie/tags/ + 2024-09-30 + + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz new file mode 100644 index 00000000..264d6a7c Binary files /dev/null and b/sitemap.xml.gz differ diff --git a/stylesheets/extra.css b/stylesheets/extra.css new file mode 100644 index 00000000..1780dab2 --- /dev/null +++ b/stylesheets/extra.css @@ -0,0 +1,23 @@ +@import url("https://raw.githubusercontent.com/redbrick/design-system/main/assets/fonts/myriad-pro/loader.css"); +@import url("https://raw.githubusercontent.com/redbrick/design-system/main/assets/fonts/fira-code/loader.css"); + +:root { + --md-text-font: "Myriad Pro", sans-serif; + --md-code-font: "Fira Code", monospace; +} + +[data-md-color-scheme="slate"] * { + --md-typeset-a-color: #d62e2e; +} + +@keyframes logo { + 0%, 40%, 80%, 100% { + transform: scale(1); + } + 20%, 60% { + transform: scale(1.5); + } +} +.md-logo { + animation: logo 1000ms 1; +} diff --git a/tags/index.html b/tags/index.html new file mode 100644 index 00000000..a784ee75 --- /dev/null +++ b/tags/index.html @@ -0,0 +1,2356 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + Tags - Redbrick Docs + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + + + + +
+
+ + + +
+
+
+ + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

Tags

+ + + + +

aperture

+ +

api

+ +

azazel

+ +

bind

+ +

chell

+ +

daedalus

+ +

debian

+ +

details

+ +

dns

+ +

docker

+ +

exposed

+ +

getting-started

+ +

glados

+ +

gpg

+ +

hardcase

+ +

hardware

+ +

icarus

+ +

images

+ +

ingress

+ +

install

+ +

johnson

+ +

ldap

+ +

libvirt

+ +

login-box

+ +

motherlode

+ +

nixos

+ +

nomad

+ +

open-gov

+ +

paphos

+ +

powercut

+ +

pygmalion

+ +

qemu

+ +

services

+ +

tagging

+ +

todo

+ +

ubuntu

+ +

vm

+ +

wheatley

+ +

zeus

+ + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + + + \ No newline at end of file