diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/00_basics/00_basics.md b/00_basics/00_basics.md new file mode 100644 index 000000000..23425b106 --- /dev/null +++ b/00_basics/00_basics.md @@ -0,0 +1,661 @@ +# The Unix Shell, Git, Github and LLMs: an Introduction + +### Requirements + +Command Line Interfaces (CLI) are found throughout all Operating Systems, however we recommend the use of the Unix CLI. If you have a Unix based machine such as Linux/Ubuntu (or other Linux distributions), macOS, you are ready for the next step. If you use a Windows machine, please install the [Windows Subsistem for Linux (WSL)](https://docs.microsoft.com/en-us/windows/wsl/install) as seen in the [Before FOSS Starts](installation.md#Software) section. + +--- + +## The Unix Shell + +The computer is a tool. It evolved over the years from being an intricated calculator into an interactive machine with thousands of moving parts that keep us all connected through the Internet. It is now the norm to use a mouse, keyboard, and seeing flashing images on our screens through the Graphical User Interface (GUI). GUIs are central to the way we interact with computers, however, to best take advantage of the computer's many systems, one needs to learn of the **Command Line Interface (CLI)**. The CLI sees the computer stripped down to only a [Terminal](https://en.wikipedia.org/wiki/Terminal_emulator) from where one can run powerful commands executed through the [Shell](https://en.wikipedia.org/wiki/Shell_(computing)). + +Whilst the GUI allows for *better accessbility* to a computer, the CLI allows for *advanced usage* of one's computer. + +### CLI vs Terminal vs Shell + +- [CLI (Command Line Interface)](https://en.wikipedia.org/wiki/Command-line_interface): an interface that receives commands (and gives output) from a user in the form of lines of text. +- [Terminal](https://en.wikipedia.org/wiki/Terminal_emulator): **the** text based interface window. +- [Shell](https://en.wikipedia.org/wiki/Shell_(computing)): a computer program and scripting language that presents a CLI which allows you to control your computer using commands. + +The **Shell** sends commands to the computer through the **CLI** accessible through a **Terminal** window + +### Things-to-Know About Commands + +- Shell commands are used to **navigate**, **visualize**, **modify** (files/folders) and **automate** (processes), and can only be executed through the shell's terminal window. +- For every command, typing `man` (manual) before the command, will open the manual for said command. +``` +$ man ls +``` + - Doing the above command will result in opening the *manual* for the `ls` command. You can exist the man page by pressing `q`. +- Each command has **flags**, or options, which are summoned with a `-`, such as ` -`. +``` +$ ls -a -l -h +``` + - Doing the above command calls for the `-a` (all), `-l` (long), `-h` (human readable) flags. This causes `ls` to output a list of *all* files (inculding hidden files/folders) with human readable file size (e.g., it will list 3MB instead of 3000000), permissions, creator, and date of creation. + - If you do not know what flags are available, you can refer to the `man` command (or for many tools, use the `-h` (help) flag). +- `.` refers to *current* directory; `..` refers to *above* directory; `/` is the directory separator; `~` indicates the home directory. +``` +$ ls . # lists files and folders in the current directory +$ ls .. # lists files and folders in the above directory +$ ls ~ # lists files and folders in the home directory +$ ls ~/Documents # lists files and folders in Documents (a folder present in the home directory) +``` + +### Introductory Shell Commands + +The following are introductory commands necessary when interacting with a computer through the Shell. These will help you orient, create and delete files. Most of this material is explained in more details in the [Carpentries' Shell Module](https://swcarpentry.github.io/shell-novice/). Visit the Carpentries' website for a more in-depth tutorial. + +!!! info "A short tutorial introducing the Shell" + Here below are quick explanations of a few elementary commands that will help you orient and navigate your files and folders through the Shell. If you would like to follow along the explanations for each command, feel free to download and unzip the [shell-lesson-data.zip](https://swcarpentry.github.io/shell-novice/data/shell-lesson-data.zip) file from the [Shell's Carpentry module](https://swcarpentry.github.io/shell-novice/setup.html). + + ??? question "Don't have access to a GUI?" + Following along on a machine with no access to a GUI? Execute the following commands: + ``` + $ sudo apt install unzip + $ wget https://swcarpentry.github.io/shell-novice/data/shell-lesson-data.zip + $ unzip shell-lesson-data.zip + ``` + +#### Navigation + +| Command | Explanation | +|---|---| +|`pwd`| print working directory | +|`ls`| list content of folder | +|`cd`| change directory | + +By typing `pwd`, the current working directory is printed. + +``` +$ pwd +/mnt/d/ +``` + +We can then use `ls` to see the contents of the current directory. By using the `-F` flag (`ls -F`) we can also see the type of file. **Note:** an asterisk (`*`) at the end of the object will denote a file, whilst a slash (`/`) will denote a folder. + +``` +$ ls -F +shell-lesson-data/ shell-lesson-data.zip* +``` + +We can then move inside the folder of our choice doing `cd`. Doing `ls` following the opening of the folder of choice, will show the contents of the folder you just moved in. Feel free to explore the contents of the folders by using `cd` and `ls`. + +``` +$ cd shell-lesson-data +$ ls -F + +exercise-data/ north-pacific-gyre/ + +$ ls -F exercise-data/ + +animal-counts/ creatures/ numbers.txt* proteins/ writing/ +``` + +!!! Tip "Use the Tab key to autocomplete" + You do not need to type the entire name of a folder or file. By using the tab key, the Shell will autocomplete the name of the files or folders. For example, typing the following + + ``` + $ ls -F exer + ``` + + and pressing the tab key, will result in autocompletion. + + ``` + $ ls -F exercise-data/ + ``` + + You can then press tab twice, to print a list of the contents of the folder. + + ``` + $ ls -F exercise-data/ + animal-counts/ creatures/ numbers.txt proteins/ writing/ + ``` + +#### Working with Files and Directories + +| Command | Explanation | +|---|---| +|`mkdir`| make a directory | +|`touch`| creat empty file | +|`nano` or `vim`| text editors | +|`mv`| move command | +|`cp`| copy command | +|`rm`| remove command | + + +Return to `shell-lesson-data`, and crate a directory with `mkdir `. + +``` +$ mkdir my_folder +$ ls -F +exercise-data/ my_folder/ north-pacific-gyre/ +``` + +Notice the new `my_folder` directory. + +!!! danger "Naming your files" + It is strongly suggested that you avoid using spaces when naming your files. When using the Shell to communicate with your machine, a space can cause errors when loading or transferring files. Instead, use dashes (`-`), underscores (`_`), periods (`.`) and CamelCase when naming your files. + + Acceptable naming: + ``` + $ mkdir my_personal_folder + $ mkdir my_personal-folder + $ mkdir MyPersonal.Folder + ``` + ??? Question + What do you think will happen if you attempt creating a folder by typing spaces? + + ??? Success "Solution" + You will obtain as many folders as typed words! + ``` + $ mkdir my folder + $ ls -F + exercise-data/ folder/ my/ north-pacific-gyre/ + ``` + Notice the two folders `my` and `folder`. + +Create an empty file with `touch `. + +``` +$ touch new_file +``` + +`touch` will create an **empty** file, it is up to you to populate using whichever text editor you prefer. Refer to the carpentries material to know more about nano and its functionalities ([link](https://swcarpentry.github.io/shell-novice/03-create/index.html#create-a-text-file)). + +!!! tip + You can also use your text editor to look at the contents of your files! + +Use `mv ` to move your newly created file to the directory you created previously (you can then use `ls` to check if you successully moved the file). + +``` +$ ls -F +exercise-data/ new_file* my_folder/ north-pacific-gyre/ + +$ mv new_file my_folder/ +$ ls -F +exercise-data/ my_folder/ north-pacific-gyre/ + +$ ls -F my_folder/ +new_file* +``` +`mv` can also be used to **rename** a file or folder with `mv `. + +``` +$ cd my_folder/ +$ mv new_file my_file +$ ls -F +my_file* +``` + +`cp` is the command to copy a file with the syntax `cp ` + +``` +$ cp my_file copy_my_file +$ ls -F +copy_my_file* my_file* +``` + +!!! note "Copying folders" + To copy folders and the content of these folders, you will have to use the `-r` flag (recursive) for `cp` in the following manner `cp -r ` (following example is from the `shell-lesson-data/` directory). + ``` + $ cp -r my_folder/ copy_my_folder + $ ls -F + copy_my_folder/ exercise-data/ my_folder/ north-pacific-gyre/ + + $ ls -F my_folder/ + copy_my_file* my_file* + + $ ls -F copy_my_folder/ + copy_my_file* my_file* + ``` + +To remove an unwanted file, use `rm `. + +``` +$ rm copy_my_file +$ ls -F +my_file +``` + +!!! note "Removing folders" + Save as the "Copying Folders" note, you have to use the `-r` flag to remove a folder `rm -r ` (following example is from the `shell-lesson-data/` directory). + ``` + $ rm -r copy_my_folder/ + $ ls -F + exercise-data/ my_folder/ north-pacific-gyre/ + ``` + +#### Introductory Remarks + +The commands listed here above are to help you better understand directories and files. There is a lot more that one can accomplish when communicating with you computer through the Shell. In case you want to know more, here are some useful links you can visit: + +- [Pipes and Filters](https://swcarpentry.github.io/shell-novice/04-pipefilter/index.html) +- [Loops](https://swcarpentry.github.io/shell-novice/05-loop/index.html) +- [Scripts](https://swcarpentry.github.io/shell-novice/06-script/index.html) +- [Finding Things](https://swcarpentry.github.io/shell-novice/07-find/index.html) + +--- + +## Git and Github + +The concept of **version control** will be touched on in more depth [later](05_version_control.md) on in FOSS, however it is important to know the basics of Git and GitHub. + +- **Git**: + - First developed in 2005, git is a version control software that allows users to make changes and add versions to their code. + - Changes and versions are saved locally. + - Accessible through the Shell. + +- **GitHub**: + - First launched in 2008, its main focus is hosting and sharing code. + - Changes and versions are saved online (requires an account). + - Mainly administered through the web (it also has a desktop app). + - Code can be **cloned** to your computer, changes can be **pulled**, **committed** and **pushed**. + +!!! Info "The **O** in FOSS" + FOSS stands for Foundational **Open** Science Skills: how many times have you worked on your code just to hit a bottleneck and found a solution on Stack Overflow? How many times have you found links that bring you to a GitHub repository with the exact snippet of code you needed? + + The beauty of the **O** is that it makes Science and its code available for all through the internet, sharing ideas and solutions for all. + + !!! Warning "Licences" + Beware of what code you use and replicate, as a complete GitHub repository comes with a **licence**. Different licences allow the code to be accessed and shared in different methods, therefore always exercise care when looking at other people's code. + +### Introducing GitHub + +Since we are talking about making science accessible, we invite you to use GitHub to save and share your code. Please start by creating a GitHub account at https://github.com/. + +#### Repositories + +Repositories are where you code is stored. A suggestion is to have *one* repository for *one* project. + +You can create repositories by clicking on the **Repositories** tab, and then clicking **New**. + +![git_1](assets/git_1.png) + +Here, you can choose the name of your own repository, choose to make it private or public, adding a README and a licence. It is **strongly** reccomended that you choose to add an empty README file. + +![git_2](assets/git_2.png) + +!!! Info "So, why a README?" + There are two main reasons why you would like a README file: + + 1. It adds structure to your repository *automatically* - otherwise you would need to create said structure by yourself (not recommended for beginners). + 2. It is the "default" file that GitHub reads upon opening the repository. It can be treated as the go-to file that explains what the repository is for, what each file does, how to cite your reasearch, amongst other things. + +!!! Info "Adding a Licence" + As discussed previously, the addition of a licence can heavily contribute to the shareability of your code. Make sure that whichever licence you choose is in line with your principals as well as your project's. GitHub comes with a list of licences which you can review. It is also common to choose a licence later on! + +Ultimately, your new repository should look like the following screenshot. Notice the **LICENCE** document and the **README.md** + +![git_3](assets/git_3.png) + +!!! Info "The Markdown Extension (.md)" + Markdown is a lightweight markup language for creating formatted text using a plain-text editor well widespread throughout text files on the web. It uses symbols (*~-#`) for syntaxing text, and it is what GitHub (and this website!) use to format text. You can read more on Markdown on the [Markdown Guide](https://www.markdownguide.org/). + +#### Adding and Modifying Code + +GitHub allows you to add and modify code in two ways: through the online portal (the webpage you're seeing) and on your computer. + +##### **Adding Code through the GitHub web page** + +Adding code to your repository through the web page is suggested if what you want to add is simple (Like a README file!). + +- Click the **Add File** button, which will allow you to either create a new file, or upload files from your computer. Select **Create New File**. +- The editing page will open: choose a name and an extension on the top of the page. +- On the editing page you can modify code as you see necessary (writing, pasting) +- ![git_05](assets/git_5.png) + - you can also see your changes (if formatted) with the preview function (with the **Preview** button). +- To "Save" your changes, you will need to **commit** your changes: + - navigate at the bottom of the page, specify your commit with a name and add a description if necessary. +- ![git_06](assets/git_6.png) +- You will be able to see your newly created file on your repository home after committing your changes. + +!!! info "Committing changes" + **Committing** is the term used for *saving* changes you've made to your code. Each **commit** can be accessed within the GitHub web interface, which will show you the code prior and after the changes you've made. To see a list of all commits you made, click on the :fontawesome-solid-clock-rotate-left: icon under the **Code** button. + + - You can see from the picture below the lines that have been removed (in red), and the lines that have been added (in green). + ![git_07](assets/git_7.png) + + - Additionally, you can also see the full list of commits made to the file or repository. + ![git_08](assets/git_8.png) + +##### **Adding Code locally** + +Adding code locally is a more complex than adding code through the web page, but it allows for better control on what files you commit. + +- To add or modify code locally, you need to **clone** the repository on your computer. This requries that you have `git` installed on your machine; If you do not have `git` installed, use the following commands: +``` +$ sudo apt-get install -y git-all +``` +- You can then clone the repository by clicking on the **Code** button, and copying the link shown +- ![git_04](assets/git_4.png) +- On your machine, open a terminal window and type the following command: +``` +$ git clone # Replace with the link you copied such as below + +$ git clone https://github.com/CosiMichele/3_git_tutorial.git +Cloning into 'foss23_git_tutorial'... +remote: Enumerating objects: 13, done. +remote: Counting objects: 100% (13/13), done. +remote: Compressing objects: 100% (12/12), done. +remote: Total 13 (delta 5), reused 0 (delta 0), pack-reused 0 +Unpacking objects: 100% (13/13), 14.47 KiB | 90.00 KiB/s, done. +``` +- Your code is now available to you on your machine, and you can add and modify files as needed. + +You have modified your code locally, however you still have to push it to the repository. Prior to doing so there are a couple of steps you should do: + +- `git status`: it checkes on the status of the repository (files that have been modified, deleted, added - from either local or in the online repository) +- `git pull`: it checks and "pulls" changes from the online repository to your local repository. It ensures that you are always updated on the repository files *and* it can save a lot of time in case there are clashing commits from different users. + +To do so: + +- **Add** all fiels you have modified and want to commit: +``` +$ git add . # Recall that "." (period) stands for all files in a folder +``` +- **Commit** the changes. When committing changes, you have to add a message (in quotation marks) with the `-m` flag. This message is a concise and descriptive few words about what you did: +``` +$ git commit -m "locally added and modified files" +[main 05f0ef6] locally added and modified files + 2 files changed, 11 insertions(+), 1 deletion(-) + create mode 100644 file_from_local.md +``` +- push your changes with **push**: +``` +$ git push +Enumerating objects: 6, done. +Counting objects: 100% (6/6), done. +Delta compression using up to 12 threads +Compressing objects: 100% (4/4), done. +Writing objects: 100% (4/4), 585 bytes | 32.00 KiB/s, done. +Total 4 (delta 0), reused 0 (delta 0) +To https://github.com/CosiMichele/foss22_git_tutorial.git + b649de3..05f0ef6 main -> main +``` + +!!! Warning "First time Pushing a commit?" + GitHub is not going to blindly allow you to push changes to the repo, but it will be asking for you to log in. + + - When asked for the user name: + - Add the username that you use to login into GitHub + - When it asks you for the password: + - **DO NOT PUT YOUR PASSWORD**, you will require a **token** instead + - Generate the token by + - On GitHub, click on your avatar (top right, and navigate to **Settings**) + - Scroll down to the bottom of the left hand menu, select **Developer settings**, and then **Personal access tokens** + - Now click on **Generate new token** (Enter password if requested) + - Choose the lenght of time for which this token is valid for, a note (for example, a reminder of what computer you're using this token on), and all the functionalities attached to it (as this is your private repository, you can select all the functionalities). Scroll to the bottom of the page and click **Generate token** + - Once created, the token is going to appear: **copy the token and paste it in the password field in your terminal instead of your password**. + +You can now see the changes you made locally on the GitHub repository page. + +![git_09](assets/git_9.png) + +#### Branching + +Branching allows you to develop your code whilst in a contained environment separate from your **main** environment. You can view the list and number of branches on the top of your repository. + +![git_10](assets/git_10.png) + +!!! Info "Why working on branches?" + Branches allow you to add/remove/change exisiting code independently from your main branch. This code can include alphas, betas and different versions of your code. Branches can be used to develop documentation or include different functionalitiets focused on Operating Systems and/or clusters and job schedulers. If needed, you can add these codes to your main branch later using [**pull requests**](00_basics.md#pull-requests). + +To create a new branch select the :octicons-git-branch-16: branch icon (listing the number of branches). This will open the branch page, which will list all of the branches in this repository. + +![git_11](assets/git_11.png) + +Select **New Branch** on the top right. Give the new branch a name of your choice, select the source of code (in this case the only source of code can be the main branch) and select **Create branch**. + +![git_12](assets/git_12.png) + +You can now see the updated list of all your branches. + +![git_13](assets/git_13.png) + +You can now use this new branch to create changes you are not yet ready to put in your main branch. + +!!! warning "Want to delete a branch?" + You can delete a branch from the branch web page by clicking on the :octicons-trash-16: trash can icon. **Beware!** All the changes you've made on that branch will be deleted! + +!!! info "Working on your machine?" + Once you create a branch online, you can change to the desired branch on your machine with `git switch `. Don't forget to push your changes first! + + !!! tip "Pull and Tab" + - Don't forget to perform a `git pull`! + - Don't know your branches? Tab! When typing `git switch`, press tab to see the options of all the branches you've created. + +#### Pull Requests + +Pull requests (PR) are proposed changes you can make on a repository. In this specific case, pull requests can be used to merge changes from a branch to another. Pull requests can also come from **forks** of your repository that another user or collaborator has made. + +Assuming you have made changes in your branch (added a file, for example), a pop up will notify you that a branch has pushed some changes. In case you want to merge the branch and the main repository, you can review and merge by clicking the **Compare & pull request** button. However, you may want to wait until more changes are made. + +![git_14](assets/git_14.png) + +Once you are ready to merge the changes onto your main branch, click on the :octicons-git-branch-16: branch icon, and select **New pull request** from the branch you have just made changes. This will open a new page which will list all the changes made showing all files that have been modified, added, or deleted. When you're done reviewing your changes, click **Create pull request**. + +![git_15](assets/git_15.png) + +!!! info "Pay attention to the information on the PR page!" + The PR page will not only show you what changes you've made, but also where the changes are coming from (which branch), as well as reviewers, assigneers, labels and other information necessary when working on a big project. It will also show whether the changes are **Able** to be merged (:material-check:) or not (:octicons-x-16:)! + +Upon createing the pull request, a new page will open which will test whether the changes can be merged automatically. Changes that are not able to be merged usually clash with other changes other collaborators have made - this will require your revision prior to merging the PR! After revision, select **Merge pull request** and **Confirm merge**. + +![git_16](assets/git_16.png) + +Your main repository should now have the files created in your other branch and merged through the PR! + +![git_17](assets/git_17.png) + +--- + +## Introduction to Prompt Engineering + +!!! info "This section is taken from the "[GPT101](https://ua-data7.github.io/introllms/)" CyVerse Workshop" + +A great set of tools that can help you with your own research, if used in the correct way, are the new Large Language Models (LLMs) available publicly. These include [:simple-openai: ChatGPT](https://chat.openai.com/), [:simple-googlebard: Bard](https://bard.google.com/?hl=en) and [:simple-microsoftbing: Bing Chat](https://www.bing.com/new?form=MY0291&OCID=MY0291) (integrated with :material-microsoft-edge: Microsoft Edge). + +### LLMs in 150 words (or less) + +**How they're made**: LLMs work by training on vast amounts of text from the internet. They learn patterns, grammar, and context from this data. When you give them a prompt, they generate text based on what they've learned. Imagine a super-smart autocomplete for text, but it can also create entire paragraphs or articles. + +**How they work**: LLMs don't understand like humans do. They predict what comes next in a sentence using math and probabilities. They don't have thoughts or feelings. They mimic human language but can make mistakes or write nonsense if not guided well. + +**How you can use them**: They're incredibly versatile. You can use them for answering questions, writing essays, coding help, and more. ***But you must be cautious because they can generate biased or false information if not used responsibly***. + +In a nutshell, LLMs are like super-powered text generators trained on the internet's vast knowledge. + +### :simple-openai: Prompt Writing + +GPT Chat asks for a message to begin its conversation. These messages are called "Prompts". + +Begin a conversation with a specific type of prompt. This will help narrow the potential range of responses and improve results to subsequent prompts. + +#### Priming + +GPTs do better when provided with "prompt primers". + +Zero-shot unconditioned prompts are likely to return the least specific responses. + +Responses are more likely to be useful when multiple specific output types are defined. + +| Types of Priming | Example | +|------------------|---------| +| Zero (Shot) | "Write five examples of assessments for watershed health." | +| Single | "Write five examples of assessments for watershed health. Here is one example: Geomorphology" | +| Multiple | "Write five examples of assessments for watershed health related to geomorphology, water quality, and species diversity." | + +#### Prompt Structure + +| Role | Task | Format | +|------|------|--------| +| Act as [\[ROLE\]](#role) | Create a [\[TASK\]](#tasks) | ... show as [\[FORMAT\]](#format) | + +Your prompt should specify the role in which ChatGPT responds, what its task is, and the format of how its outputs should be returned. + +A second step to the initial prompt is to [link or chain](#linked-prompts) your subsequent prompts. + +This lesson only covers ChatGPT, but the same prompt techniques can be used in other LLMs. + +#### Role + +Set the role for ChatGPT to play during your session. + +"I want you to act as ..." will establish what type of conversation you are planning to have. + +| Types of Roles | +|---| +| Project Manager | +| Copywriter / Editor | +| Paper Reviewer | +| Teacher / Mentor / Advisor | +| Student / Learner / Participant | +| Software Engineer | +| DevOps Engineer | +| Linux Terminal | +| Python Interpreter | +| Web Browser | + +Examples of roles you might ask for are: a domain science expert, an IT or DevOps engineer, software programmer, journal editor, paper reviewer, mentor, teacher, or student. You can even instruct ChatGPT to respond as though it were a Linux [terminal](https://www.engraved.blog/building-a-virtual-machine-inside/), a web browser, a search engine, or language interpreter. + +??? Abstract "Data Scientist" + + Let's try an example prompt with role-playing to help write code in the R programming language. + + ```markdown + I want you to act as a data scientist with complete knowledge of the R language, + the TidyVerse, and RStudio. + + Write the code required to create a new R project environment, + Download and load the Palmer Penguins dataset, and plot regressions of body mass, + bill length, and width for the species of Penguins in the dataset. + + Your response output should be in R and RMarkDown format + with text and code delineated with ``` blocks. + + At the beginning of new file make sure to install any + RStudio system dependencies and R libraries that Palmer Penguins requires. + ``` + + Example can use `GPT-3.5-Turbo` or `GPT-4` + +??? Abstract "Talk to Dead Scientists" + + Try to ask a question with and without Internet access enabled: + + ```markdown + I want you to respond as though you are the mathematician Benoit Mandelbrot + + Explain the relationship of lacunarity and fractal dimension for a self-affine series + + Show your results using mathematical equations in LaTeX or MathJax style format + ``` + Again, there is no guarantee that the results ChatGPT provides are factual, but it does greatly improve the odds that they are relevant to the prompt. Most importantly, these extensions provide citations for their results, allowing you to research the results yourself. + +#### Tasks + +Prompts which return informative responses to questions like "What is ..." or "How does ..." + +Because of ChatGPT's proclivity at making up information, using it without a way of validating the authenticity of its responses makes it less trustworthy than regular search engines. + +| Types of Task | +|---| +| Scientific Article | +| Essay | +| Blog Post | +| Outline | +| Email | +| Cover Letter | +| Recipe | +| Tutorial | +| Lesson Plan | +| Jupyter Notebook | +| Configuration | +| Code | +| Software Script | + +Bing and Bard fill an important space in these types of prompts - they return websites which match the query criterion and allow you to research your own answers. + +There are extension tools for ChatGPT which allows you to prompt with references. + +#### Format + +By default ChatGPT outputs MarkDown syntax text. It can also output software code, and soon images, video, music and sounds. + +| Formats to output | +|---| +| MarkDown Text (\& emojis) | +| List | +| Table | +| HTML | +| CSS | +| Regular Expression | +| CSV / TXT | +| JSON | +| Rich Text | +| Gantt Chart | +| Word Cloud | +| Graphs | +| Spreadsheets | + +You can also ask ChatGPT to explain complex topics or to act as a cook-book step-by-step guide. + +ChatGPT can provide instructional details about how to do specific tasks. + +??? Abstract "Documentation Writer" + + ```markdown + I want you to act as a DIY expert. You will help me develop the skills necessary + to complete simple lab documentation, create tutorials and guides for beginners and experts, + and explain complex concepts in layman's terms using visual techniques, and develop helpful resources. + + I want you to create a tutorial for building and deploying a github.io website using the MkDocs Material Theme + ``` + +### Further Documentation & Questions + +For a more in depth quick start, go to the [GPT 101](https://ua-data7.github.io/introllms/) workshop. + +Documentation of interest: + +- Read the [:simple-openai: ChatGPT Documentation](https://openai.com/blog/chatgpt) +- Read the [:fontawesome-regular-file-pdf: ChatGPT Technical Report](https://doi.org/10.48550/arXiv.2303.08774) +- Read the [:fontawesome-regular-file-pdf: Bard Documentation](https://ai.google/static/documents/google-about-bard.pdf) + +??? Tip "How long can or should a prompt be?" + + The length of a prompt is [measured in "tokens"](https://techcommunity.microsoft.com/t5/healthcare-and-life-sciences/unlocking-the-power-of-tokens-optimizing-token-usage-in-gpt-for/ba-p/3826665). A token can represent an individual character, a word, or a subword depending on the specific tokenization approach. A rough estimate for the average number of words in English language per token is `0.75`. + + Currently, ChatGPT version `GPT-3.5turbo` uses up to 2,048 tokens per prompt, GPT-4 and Bing Chat can take up to 32,768 tokens. BARD currently has a limit of 20,000 tokens in a prompt. + + This means that a 2,048 token prompt would be equivalent to about 1,536 words (3-6 pages), and a 32,768 token prompt would be 24,576 words (50-100 pages). + + However, this is only an approximation and may vary depending on the specific text and model. + + What this also means is that current GPT are not capable of reading many PDFs at one time, for example, to do a literature review, or to write a sequel to a novel or book series. + +??? Tip "ChatGPT :simple-awesomelists: Awesome Lists" + + There is an ever changing meta-list of :simple-awesomelists: Awesome lists curated around ChatGPT plugins and extensions. + + [:simple-github: search: `chatgpt+awesome`](https://github.com/search?q=awesome-chatgpt+&type=repositories&s=stars&o=desc) + + Check out lists around: + + [:simple-awesomelists: ChatGPT Prompts](https://github.com/f/awesome-chatgpt-prompts) + + [:simple-awesomelists: ChatGPT Data Science Prompts](https://github.com/travistangvh/ChatGPT-Data-Science-Prompts) + + [:simple-awesomelists: API plugins, extensions, & applications](https://github.com/humanloop/awesome-chatgpt) + + +??? Tip "Access the Internet" + + By default, ChatGPT does not have access to the Internet, and is limited to the time period before September 2021 (as of mid-2023) for its training data time frame. + + There are third-party extensions, like [WebChatGPT](https://www.webchatgpt.app/) which you can install in your browser (Firefox or Chrome), that will extend OpenAI ChatGPT's reach to the internet. + + We presently recommend using [:material-microsoft-bing: Bing Chat](bing.md) with Edge Browser instead of ChatGPT 3.5 for prompting which works with the internet. + + [:simple-google: Bard](bard.md) also has access to the web and limited integration with Google Workspace. diff --git a/00_basics/index.html b/00_basics/index.html new file mode 100644 index 000000000..af8b3a3a4 --- /dev/null +++ b/00_basics/index.html @@ -0,0 +1,2173 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0. The Shell and Git - CyVerse Foundational Open Science Skills 2023 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

The Unix Shell, Git, Github and LLMs: an Introduction

+

Requirements

+

Command Line Interfaces (CLI) are found throughout all Operating Systems, however we recommend the use of the Unix CLI. If you have a Unix based machine such as Linux/Ubuntu (or other Linux distributions), macOS, you are ready for the next step. If you use a Windows machine, please install the Windows Subsistem for Linux (WSL) as seen in the Before FOSS Starts section.

+
+

The Unix Shell

+

The computer is a tool. It evolved over the years from being an intricated calculator into an interactive machine with thousands of moving parts that keep us all connected through the Internet. It is now the norm to use a mouse, keyboard, and seeing flashing images on our screens through the Graphical User Interface (GUI). GUIs are central to the way we interact with computers, however, to best take advantage of the computer's many systems, one needs to learn of the Command Line Interface (CLI). The CLI sees the computer stripped down to only a Terminal from where one can run powerful commands executed through the Shell.

+

Whilst the GUI allows for better accessbility to a computer, the CLI allows for advanced usage of one's computer.

+

CLI vs Terminal vs Shell

+
    +
  • CLI (Command Line Interface): an interface that receives commands (and gives output) from a user in the form of lines of text.
  • +
  • Terminal: the text based interface window.
  • +
  • Shell: a computer program and scripting language that presents a CLI which allows you to control your computer using commands.
  • +
+

The Shell sends commands to the computer through the CLI accessible through a Terminal window

+

Things-to-Know About Commands

+
    +
  • Shell commands are used to navigate, visualize, modify (files/folders) and automate (processes), and can only be executed through the shell's terminal window.
  • +
  • For every command, typing man (manual) before the command, will open the manual for said command. +
    $ man ls
    +
      +
    • Doing the above command will result in opening the manual for the ls command. You can exist the man page by pressing q.
    • +
    +
  • +
  • Each command has flags, or options, which are summoned with a -, such as <command> -<flag>. +
    $ ls -a -l -h
    +
      +
    • Doing the above command calls for the -a (all), -l (long), -h (human readable) flags. This causes ls to output a list of all files (inculding hidden files/folders) with human readable file size (e.g., it will list 3MB instead of 3000000), permissions, creator, and date of creation.
    • +
    • If you do not know what flags are available, you can refer to the man command (or for many tools, use the -h (help) flag).
    • +
    +
  • +
  • . refers to current directory; .. refers to above directory; / is the directory separator; ~ indicates the home directory. +
    $ ls .            # lists files and folders in the current directory
    +$ ls ..           # lists files and folders in the above directory
    +$ ls ~            # lists files and folders in the home directory
    +$ ls ~/Documents  # lists files and folders in Documents (a folder present in the home directory)
    +
  • +
+

Introductory Shell Commands

+

The following are introductory commands necessary when interacting with a computer through the Shell. These will help you orient, create and delete files. Most of this material is explained in more details in the Carpentries' Shell Module. Visit the Carpentries' website for a more in-depth tutorial.

+
+

A short tutorial introducing the Shell

+
Here below are quick explanations of a few elementary commands that will help you orient and navigate your files and folders through the Shell. If you would like to follow along the explanations for each command, feel free to download and unzip the [shell-lesson-data.zip](https://swcarpentry.github.io/shell-novice/data/shell-lesson-data.zip) file from the [Shell's Carpentry module](https://swcarpentry.github.io/shell-novice/setup.html).
+
+??? question "Don't have access to a GUI?"
+    Following along on a machine with no access to a GUI? Execute the following commands:
+    ```
+    $ sudo apt install unzip
+    $ wget https://swcarpentry.github.io/shell-novice/data/shell-lesson-data.zip
+    $ unzip shell-lesson-data.zip
+    ```
+
+
+ + + + + + + + + + + + + + + + + + + + + + +
CommandExplanation
pwdprint working directory
lslist content of folder
cdchange directory
+

By typing pwd, the current working directory is printed.

+
$ pwd
+/mnt/d/
+
+

We can then use ls to see the contents of the current directory. By using the -F flag (ls -F) we can also see the type of file. Note: an asterisk (*) at the end of the object will denote a file, whilst a slash (/) will denote a folder.

+
$ ls -F 
+shell-lesson-data/   shell-lesson-data.zip*
+
+

We can then move inside the folder of our choice doing cd. Doing ls following the opening of the folder of choice, will show the contents of the folder you just moved in. Feel free to explore the contents of the folders by using cd and ls.

+
$ cd shell-lesson-data
+$ ls -F
+
+exercise-data/  north-pacific-gyre/
+
+$ ls -F exercise-data/
+
+animal-counts/  creatures/  numbers.txt*  proteins/  writing/
+
+
+

Use the Tab key to autocomplete

+
You do not need to type the entire name of a folder or file. By using the tab key, the Shell will autocomplete the name of the files or folders. For example, typing the following
+
+```
+$ ls -F exer
+```
+
+and pressing the tab key, will result in autocompletion.
+
+```
+$ ls -F exercise-data/
+```
+
+You can then press tab twice, to print a list of the contents of the folder.
+
+```
+$ ls -F exercise-data/
+animal-counts/ creatures/     numbers.txt    proteins/      writing/ 
+```
+
+
+

Working with Files and Directories

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
CommandExplanation
mkdirmake a directory
touchcreat empty file
nano or vimtext editors
mvmove command
cpcopy command
rmremove command
+

Return to shell-lesson-data, and crate a directory with mkdir <name of folder>.

+
$ mkdir my_folder
+$ ls -F
+exercise-data/  my_folder/  north-pacific-gyre/
+
+

Notice the new my_folder directory.

+
+

Naming your files

+
It is strongly suggested that you avoid using spaces when naming your files. When using the Shell to communicate with your machine, a space can cause errors when loading or transferring files. Instead, use dashes (`-`), underscores (`_`), periods (`.`) and CamelCase when naming your files.
+
+Acceptable naming:
+```
+$ mkdir my_personal_folder
+$ mkdir my_personal-folder
+$ mkdir MyPersonal.Folder
+```
+??? Question
+    What do you think will happen if you attempt creating a folder by typing spaces?
+
+    ??? Success "Solution"
+        You will obtain as many folders as typed words!
+        ```
+        $ mkdir my folder
+        $ ls -F
+        exercise-data/  folder/  my/  north-pacific-gyre/
+        ```
+        Notice the two folders `my` and `folder`.
+
+
+

Create an empty file with touch <name of file>.

+
$ touch new_file
+
+

touch will create an empty file, it is up to you to populate using whichever text editor you prefer. Refer to the carpentries material to know more about nano and its functionalities (link).

+
+

Tip

+
You can also use your text editor to look at the contents of your files!
+
+
+

Use mv <name of file or folder you want to move> <name of destination folder> to move your newly created file to the directory you created previously (you can then use ls to check if you successully moved the file).

+

$ ls -F
+exercise-data/  new_file*  my_folder/  north-pacific-gyre/
+
+$ mv new_file my_folder/
+$ ls -F
+exercise-data/  my_folder/  north-pacific-gyre/
+
+$ ls -F my_folder/
+new_file*
+
+mv can also be used to rename a file or folder with mv <name of file or folder you want to change> <new name>.

+
$ cd my_folder/
+$ mv new_file my_file
+$ ls -F
+my_file*
+
+

cp is the command to copy a file with the syntax cp <name of file you want to copy> <name of copy file>

+
$ cp my_file copy_my_file
+$ ls -F 
+copy_my_file*  my_file*
+
+
+

Copying folders

+
To copy folders and the content of these folders, you will have to use the `-r` flag (recursive) for `cp` in the following manner `cp -r <name of folder you want to copy> <name of copy folder>` (following example is from the `shell-lesson-data/` directory).
+```
+$ cp -r my_folder/ copy_my_folder
+$ ls -F
+copy_my_folder/  exercise-data/  my_folder/  north-pacific-gyre/
+
+$ ls -F my_folder/
+copy_my_file*  my_file*
+
+$ ls -F copy_my_folder/
+copy_my_file*  my_file*
+```
+
+
+

To remove an unwanted file, use rm <name of file to remove>.

+
$ rm copy_my_file
+$ ls -F 
+my_file
+
+
+

Removing folders

+
Save as the "Copying Folders" note, you have to use the `-r` flag to remove a folder `rm -r <name of folder you want to remove>` (following example is from the `shell-lesson-data/` directory).
+```
+$ rm -r copy_my_folder/
+$ ls -F
+exercise-data/  my_folder/  north-pacific-gyre/
+```
+
+
+

Introductory Remarks

+

The commands listed here above are to help you better understand directories and files. There is a lot more that one can accomplish when communicating with you computer through the Shell. In case you want to know more, here are some useful links you can visit:

+ +
+

Git and Github

+

The concept of version control will be touched on in more depth later on in FOSS, however it is important to know the basics of Git and GitHub.

+
    +
  • +

    Git:

    +
      +
    • First developed in 2005, git is a version control software that allows users to make changes and add versions to their code.
    • +
    • Changes and versions are saved locally.
    • +
    • Accessible through the Shell.
    • +
    +
  • +
  • +

    GitHub:

    +
      +
    • First launched in 2008, its main focus is hosting and sharing code.
    • +
    • Changes and versions are saved online (requires an account).
    • +
    • Mainly administered through the web (it also has a desktop app).
    • +
    • Code can be cloned to your computer, changes can be pulled, committed and pushed.
    • +
    +
  • +
+
+

The O in FOSS

+
FOSS stands for Foundational **Open** Science Skills: how many times have you worked on your code just to hit a bottleneck and found a solution on Stack Overflow? How many times have you found links that bring you to a GitHub repository with the exact snippet of code you needed?
+
+The beauty of the **O** is that it makes Science and its code available for all through the internet, sharing ideas and solutions for all.
+
+!!! Warning "Licences"
+        Beware of what code you use and replicate, as a complete GitHub repository comes with a **licence**. Different licences allow the code to be accessed and shared in different methods, therefore always exercise care when looking at other people's code.
+
+
+

Introducing GitHub

+

Since we are talking about making science accessible, we invite you to use GitHub to save and share your code. Please start by creating a GitHub account at https://github.com/.

+

Repositories

+

Repositories are where you code is stored. A suggestion is to have one repository for one project.

+

You can create repositories by clicking on the Repositories tab, and then clicking New.

+

git_1

+

Here, you can choose the name of your own repository, choose to make it private or public, adding a README and a licence. It is strongly reccomended that you choose to add an empty README file.

+

git_2

+
+

So, why a README?

+
There are two main reasons why you would like a README file:
+
+1. It adds structure to your repository *automatically* - otherwise you would need to create said structure by yourself (not recommended for beginners).
+2. It is the "default" file that GitHub reads upon opening the repository. It can be treated as the go-to file that explains what the repository is for, what each file does, how to cite your reasearch, amongst other things.
+
+
+
+

Adding a Licence

+
As discussed previously, the addition of a licence can heavily contribute to the shareability of your code. Make sure that whichever licence you choose is in line with your principals as well as your project's. GitHub comes with a list of licences which you can review. It is also common to choose a licence later on!
+
+
+

Ultimately, your new repository should look like the following screenshot. Notice the LICENCE document and the README.md

+

git_3

+
+

The Markdown Extension (.md)

+
Markdown is a lightweight markup language for creating formatted text using a plain-text editor well widespread throughout text files on the web. It uses symbols (*~-#`) for syntaxing text, and it is what GitHub (and this website!) use to format text. You can read more on Markdown on the [Markdown Guide](https://www.markdownguide.org/).
+
+
+

Adding and Modifying Code

+

GitHub allows you to add and modify code in two ways: through the online portal (the webpage you're seeing) and on your computer.

+
Adding Code through the GitHub web page
+

Adding code to your repository through the web page is suggested if what you want to add is simple (Like a README file!).

+
    +
  • Click the Add File button, which will allow you to either create a new file, or upload files from your computer. Select Create New File.
  • +
  • The editing page will open: choose a name and an extension on the top of the page.
  • +
  • On the editing page you can modify code as you see necessary (writing, pasting)
  • +
  • git_05
      +
    • you can also see your changes (if formatted) with the preview function (with the Preview button).
    • +
    +
  • +
  • To "Save" your changes, you will need to commit your changes:
      +
    • navigate at the bottom of the page, specify your commit with a name and add a description if necessary.
    • +
    +
  • +
  • git_06
  • +
  • You will be able to see your newly created file on your repository home after committing your changes.
  • +
+
+

Committing changes

+
**Committing** is the term used for *saving* changes you've made to your code. Each **commit** can be accessed within the GitHub web interface, which will show you the code prior and after the changes you've made. To see a list of all commits you made, click on the :fontawesome-solid-clock-rotate-left: icon under the **Code** button.
+
+- You can see from the picture below the lines that have been removed (in red), and the lines that have been added (in green).
+![git_07](assets/git_7.png)
+
+- Additionally, you can also see the full list of commits made to the file or repository.
+![git_08](assets/git_8.png)
+
+
+
Adding Code locally
+

Adding code locally is a more complex than adding code through the web page, but it allows for better control on what files you commit.

+
    +
  • To add or modify code locally, you need to clone the repository on your computer. This requries that you have git installed on your machine; If you do not have git installed, use the following commands: +
    $ sudo apt-get install -y git-all
    +
  • +
  • You can then clone the repository by clicking on the Code button, and copying the link shown
  • +
  • git_04
  • +
  • On your machine, open a terminal window and type the following command: +
    $ git clone <repository address>     # Replace <repository address> with the link you copied such as below
    +
    +$ git clone https://github.com/CosiMichele/3_git_tutorial.git
    +Cloning into 'foss23_git_tutorial'...
    +remote: Enumerating objects: 13, done.
    +remote: Counting objects: 100% (13/13), done.
    +remote: Compressing objects: 100% (12/12), done.
    +remote: Total 13 (delta 5), reused 0 (delta 0), pack-reused 0
    +Unpacking objects: 100% (13/13), 14.47 KiB | 90.00 KiB/s, done.
    +
  • +
  • Your code is now available to you on your machine, and you can add and modify files as needed.
  • +
+

You have modified your code locally, however you still have to push it to the repository. Prior to doing so there are a couple of steps you should do:

+
    +
  • git status: it checkes on the status of the repository (files that have been modified, deleted, added - from either local or in the online repository)
  • +
  • git pull: it checks and "pulls" changes from the online repository to your local repository. It ensures that you are always updated on the repository files and it can save a lot of time in case there are clashing commits from different users.
  • +
+

To do so:

+
    +
  • Add all fiels you have modified and want to commit: +
    $ git add .    # Recall that "." (period) stands for all files in a folder 
    +
  • +
  • Commit the changes. When committing changes, you have to add a message (in quotation marks) with the -m flag. This message is a concise and descriptive few words about what you did: +
    $ git commit -m "locally added and modified files"
    +[main 05f0ef6] locally added and modified files
    + 2 files changed, 11 insertions(+), 1 deletion(-)
    + create mode 100644 file_from_local.md
    +
  • +
  • push your changes with push: +
    $ git push
    +Enumerating objects: 6, done.
    +Counting objects: 100% (6/6), done.
    +Delta compression using up to 12 threads
    +Compressing objects: 100% (4/4), done.
    +Writing objects: 100% (4/4), 585 bytes | 32.00 KiB/s, done.
    +Total 4 (delta 0), reused 0 (delta 0)
    +To https://github.com/CosiMichele/foss22_git_tutorial.git
    +   b649de3..05f0ef6  main -> main
    +
  • +
+
+

First time Pushing a commit?

+
GitHub is not going to blindly allow you to push changes to the repo, but it will be asking for you to log in.
+
+- When asked for the user name:
+    - Add the username that you use to login into GitHub
+- When it asks you for the password:
+    - **DO NOT PUT YOUR PASSWORD**, you will require a **token** instead
+    - Generate the token by 
+        - On GitHub, click on your avatar (top right, and navigate to **Settings**)
+        - Scroll down to the bottom of the left hand menu, select **Developer settings**, and then **Personal access tokens**
+        - Now click on **Generate new token** (Enter password if requested)
+        - Choose the lenght of time for which this token is valid for, a note (for example, a reminder of what computer you're using this token on), and all the functionalities attached to it (as this is your private repository, you can select all the functionalities). Scroll to the bottom of the page and click **Generate token**
+        - Once created, the token is going to appear: **copy the token and paste it in the password field in your terminal instead of your password**.
+
+
+

You can now see the changes you made locally on the GitHub repository page.

+

git_09

+

Branching

+

Branching allows you to develop your code whilst in a contained environment separate from your main environment. You can view the list and number of branches on the top of your repository.

+

git_10

+
+

Why working on branches?

+
Branches allow you to add/remove/change exisiting code independently from your main branch. This code can include alphas, betas and different versions of your code. Branches can be used to develop documentation or include different functionalitiets focused on Operating Systems and/or clusters and job schedulers. If needed, you can add these codes to your main branch later using [**pull requests**](00_basics.md#pull-requests).
+
+
+

To create a new branch select the branch icon (listing the number of branches). This will open the branch page, which will list all of the branches in this repository.

+

git_11

+

Select New Branch on the top right. Give the new branch a name of your choice, select the source of code (in this case the only source of code can be the main branch) and select Create branch.

+

git_12

+

You can now see the updated list of all your branches.

+

git_13

+

You can now use this new branch to create changes you are not yet ready to put in your main branch.

+
+

Want to delete a branch?

+
You can delete a branch from the branch web page by clicking on the :octicons-trash-16: trash can icon. **Beware!** All the changes you've made on that branch will be deleted!
+
+
+
+

Working on your machine?

+
Once you create a branch online, you can change to the desired branch on your machine with `git switch <branch>`. Don't forget to push your changes first!
+
+!!! tip "Pull and Tab"
+        - Don't forget to perform a `git pull`!
+        - Don't know your branches? Tab! When typing `git switch`, press tab to see the options of all the branches you've created.
+
+
+

Pull Requests

+

Pull requests (PR) are proposed changes you can make on a repository. In this specific case, pull requests can be used to merge changes from a branch to another. Pull requests can also come from forks of your repository that another user or collaborator has made.

+

Assuming you have made changes in your branch (added a file, for example), a pop up will notify you that a branch has pushed some changes. In case you want to merge the branch and the main repository, you can review and merge by clicking the Compare & pull request button. However, you may want to wait until more changes are made.

+

git_14

+

Once you are ready to merge the changes onto your main branch, click on the branch icon, and select New pull request from the branch you have just made changes. This will open a new page which will list all the changes made showing all files that have been modified, added, or deleted. When you're done reviewing your changes, click Create pull request.

+

git_15

+
+

Pay attention to the information on the PR page!

+
The PR page will not only show you what changes you've made, but also where the changes are coming from (which branch), as well as reviewers, assigneers, labels and other information necessary when working on a big project. It will also show whether the changes are **Able** to be merged (:material-check:) or not (:octicons-x-16:)!
+
+
+

Upon createing the pull request, a new page will open which will test whether the changes can be merged automatically. Changes that are not able to be merged usually clash with other changes other collaborators have made - this will require your revision prior to merging the PR! After revision, select Merge pull request and Confirm merge.

+

git_16

+

Your main repository should now have the files created in your other branch and merged through the PR!

+

git_17

+
+

Introduction to Prompt Engineering

+
+

This section is taken from the "GPT101" CyVerse Workshop

+
+

A great set of tools that can help you with your own research, if used in the correct way, are the new Large Language Models (LLMs) available publicly. These include ChatGPT, Bard and Bing Chat (integrated with Microsoft Edge).

+

LLMs in 150 words (or less)

+

How they're made: LLMs work by training on vast amounts of text from the internet. They learn patterns, grammar, and context from this data. When you give them a prompt, they generate text based on what they've learned. Imagine a super-smart autocomplete for text, but it can also create entire paragraphs or articles.

+

How they work: LLMs don't understand like humans do. They predict what comes next in a sentence using math and probabilities. They don't have thoughts or feelings. They mimic human language but can make mistakes or write nonsense if not guided well.

+

How you can use them: They're incredibly versatile. You can use them for answering questions, writing essays, coding help, and more. But you must be cautious because they can generate biased or false information if not used responsibly.

+

In a nutshell, LLMs are like super-powered text generators trained on the internet's vast knowledge.

+

Prompt Writing

+

GPT Chat asks for a message to begin its conversation. These messages are called "Prompts".

+

Begin a conversation with a specific type of prompt. This will help narrow the potential range of responses and improve results to subsequent prompts.

+

Priming

+

GPTs do better when provided with "prompt primers".

+

Zero-shot unconditioned prompts are likely to return the least specific responses.

+

Responses are more likely to be useful when multiple specific output types are defined.

+ + + + + + + + + + + + + + + + + + + + + +
Types of PrimingExample
Zero (Shot)"Write five examples of assessments for watershed health."
Single"Write five examples of assessments for watershed health. Here is one example: Geomorphology"
Multiple"Write five examples of assessments for watershed health related to geomorphology, water quality, and species diversity."
+

Prompt Structure

+ + + + + + + + + + + + + + + +
RoleTaskFormat
Act as [ROLE]Create a [TASK]... show as [FORMAT]
+

Your prompt should specify the role in which ChatGPT responds, what its task is, and the format of how its outputs should be returned.

+

A second step to the initial prompt is to link or chain your subsequent prompts.

+

This lesson only covers ChatGPT, but the same prompt techniques can be used in other LLMs.

+

Role

+

Set the role for ChatGPT to play during your session.

+

"I want you to act as ..." will establish what type of conversation you are planning to have.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Types of Roles
Project Manager
Copywriter / Editor
Paper Reviewer
Teacher / Mentor / Advisor
Student / Learner / Participant
Software Engineer
DevOps Engineer
Linux Terminal
Python Interpreter
Web Browser
+

Examples of roles you might ask for are: a domain science expert, an IT or DevOps engineer, software programmer, journal editor, paper reviewer, mentor, teacher, or student. You can even instruct ChatGPT to respond as though it were a Linux terminal, a web browser, a search engine, or language interpreter.

+
+Data Scientist +

Let's try an example prompt with role-playing to help write code in the R programming language.

+
I want you to act as a data scientist with complete knowledge of the R language, 
+the TidyVerse, and RStudio. 
+
+Write the code required to create a new R project environment,
+Download and load the Palmer Penguins dataset, and plot regressions of body mass, 
+bill length, and width for the species of Penguins in the dataset. 
+
+Your response output should be in R and RMarkDown format 
+with text and code delineated with ``` blocks.
+
+At the beginning of new file make sure to install any 
+RStudio system dependencies and R libraries that Palmer Penguins requires.
+
+

Example can use GPT-3.5-Turbo or GPT-4

+
+
+Talk to Dead Scientists +

Try to ask a question with and without Internet access enabled:

+

I want you to respond as though you are the mathematician Benoit Mandelbrot
+
+Explain the relationship of lacunarity and fractal dimension for a self-affine series
+
+Show your results using mathematical equations in LaTeX or MathJax style format
+
+Again, there is no guarantee that the results ChatGPT provides are factual, but it does greatly improve the odds that they are relevant to the prompt. Most importantly, these extensions provide citations for their results, allowing you to research the results yourself.

+
+

Tasks

+

Prompts which return informative responses to questions like "What is ..." or "How does ..."

+

Because of ChatGPT's proclivity at making up information, using it without a way of validating the authenticity of its responses makes it less trustworthy than regular search engines.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Types of Task
Scientific Article
Essay
Blog Post
Outline
Email
Cover Letter
Recipe
Tutorial
Lesson Plan
Jupyter Notebook
Configuration
Code
Software Script
+

Bing and Bard fill an important space in these types of prompts - they return websites which match the query criterion and allow you to research your own answers.

+

There are extension tools for ChatGPT which allows you to prompt with references.

+

Format

+

By default ChatGPT outputs MarkDown syntax text. It can also output software code, and soon images, video, music and sounds.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Formats to output
MarkDown Text (\& emojis)
List
Table
HTML
CSS
Regular Expression
CSV / TXT
JSON
Rich Text
Gantt Chart
Word Cloud
Graphs
Spreadsheets
+

You can also ask ChatGPT to explain complex topics or to act as a cook-book step-by-step guide.

+

ChatGPT can provide instructional details about how to do specific tasks.

+
+Documentation Writer +
I want you to act as a DIY expert. You will help me develop the skills necessary 
+to complete simple lab documentation, create tutorials and guides for beginners and experts, 
+and explain complex concepts in layman's terms using visual techniques, and develop helpful resources.
+
+I want you to create a tutorial for building and deploying a github.io website using the MkDocs Material Theme
+
+
+

Further Documentation & Questions

+

For a more in depth quick start, go to the GPT 101 workshop.

+

Documentation of interest:

+ +
+How long can or should a prompt be? +

The length of a prompt is measured in "tokens". A token can represent an individual character, a word, or a subword depending on the specific tokenization approach. A rough estimate for the average number of words in English language per token is 0.75.

+

Currently, ChatGPT version GPT-3.5turbo uses up to 2,048 tokens per prompt, GPT-4 and Bing Chat can take up to 32,768 tokens. BARD currently has a limit of 20,000 tokens in a prompt.

+

This means that a 2,048 token prompt would be equivalent to about 1,536 words (3-6 pages), and a 32,768 token prompt would be 24,576 words (50-100 pages).

+

However, this is only an approximation and may vary depending on the specific text and model.

+

What this also means is that current GPT are not capable of reading many PDFs at one time, for example, to do a literature review, or to write a sequel to a novel or book series.

+
+
+ChatGPT Awesome Lists +

There is an ever changing meta-list of Awesome lists curated around ChatGPT plugins and extensions.

+

search: chatgpt+awesome

+

Check out lists around:

+

ChatGPT Prompts

+

ChatGPT Data Science Prompts

+

API plugins, extensions, & applications

+
+
+Access the Internet +

By default, ChatGPT does not have access to the Internet, and is limited to the time period before September 2021 (as of mid-2023) for its training data time frame.

+

There are third-party extensions, like WebChatGPT which you can install in your browser (Firefox or Chrome), that will extend OpenAI ChatGPT's reach to the internet.

+

We presently recommend using Bing Chat with Edge Browser instead of ChatGPT 3.5 for prompting which works with the internet.

+

Bard also has access to the web and limited integration with Google Workspace.

+
+ +
+
+ + + Last update: + 2023-10-18 + + +
+ + + + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/01_intro_open_sci/01_intro_open_sci.md b/01_intro_open_sci/01_intro_open_sci.md new file mode 100644 index 000000000..345af01f6 --- /dev/null +++ b/01_intro_open_sci/01_intro_open_sci.md @@ -0,0 +1,747 @@ +# Introduction to Open Science + +!!! Success "Learning Objectives" + + After this lesson, you should be able to: + + * Explain what Open Science is + * Explain the components of Open Science + * Describe the behaviors of Open Science + * Explain why Open Science matters in education, research, and society + * Understand the advantages and the challenges to Open Science + * Identify who the practitioners of Open Science are + * Understand the underlying Ethos of Open Science + +## 2023 is the Year of Open Science! + +The White House, joined by 10 federal agencies, and a coalition of more than 85 universities, declared 2023 the [Year of Open Science](https://nasa.github.io/Transform-to-Open-Science/year-of-open-science/){target=_blank}. + + +
+ ![open science](../assets/year_of_open_science.jpg){ width="500" } +
2023 is the year of Open Science
+
+ + +## What is Open Science? + +If you ask a dozen researchers this question, you will probably get just as many answers. + +This means that Open Science isn't necessarily a set of checkboxes you need to tick, but rather a holistic approach to doing science. In that spirit, it can also be useful to think about Open Science as a spectrum, from less to more open. + + +??? Quote "Definitions" + + "Open Science is defined as an inclusive construct that combines various movements and practices aiming to make multilingual scientific knowledge openly available, accessible and reusable for everyone, to increase scientific collaborations and sharing of information for the benefits of science and society, and to open the processes of scientific knowledge creation, evaluation and communication to societal actors beyond the traditional scientific community." - [UNESCO Definition](https://www.unesco.org/en/natural-sciences/open-science){target=_blank} + + - [UNESCO's Recommendation on Open Science](https://unesdoc.unesco.org/ark:/48223/pf0000379949.locale=en){target=_blank} + + "Open Science is the movement to make scientific research (including publications, data, physical samples, and software) and its dissemination accessible to all levels of society, amateur or professional..." [ :material-wikipedia: Wikipedia definition](https://en.wikipedia.org/wiki/Open_science){target=_blank} + + Open and Collaborative Science Network's [Open Science Manifesto](https://ocsdnet.org/manifesto/open-science-manifesto/){target=_blank} + +??? Example "Six Pillars :material-pillar: of Open Science" + + **:material-pillar: Open Access Publications** + + **:material-pillar: Open Data** + + **:material-pillar: Open Educational Resources** + + **:material-pillar: Open Methodology** + + **:material-pillar: Open Peer Review** + + **:material-pillar: Open Source Software** + + ??? Question "Wait, how many pillars :material-pillar: of Open Science Really Are There?" + + The number can be from [4 :material-pillar:](https://narratives.insidehighered.com/four-pillars-of-open-science/){target=_blank} to [8 :material-pillar:](https://www.ucl.ac.uk/library/research-support/open-science/8-pillars-open-science){target=_blank} + +??? Tip "Foster Open Science Diagram" + + [![foster](https://www.fosteropenscience.eu/themes/fosterstrap/images/taxonomies/os_taxonomy.png)](https://www.fosteropenscience.eu/resources){target=_blank} + + Graphic by [Foster Open Science](https://www.fosteropenscience.eu/){target=_blank} + + + +``` mermaid +flowchart LR + +id1([open science]) --> id3([publishing]) & id4([data]) & id5([open source software]) + +id3([publishing]) --> id41([access]) & id42([reviews]) & id43([methods]) & id44([educational resources]) + +id5([open source software]) --> id13([container registries]) & id10([services]) & id101([workflows]) & id12([version control systems]) + +id12([version control systems]) --> id101([workflows]) + +id13([container registries]) --> id101([workflows]) + +id14([public data registry]) --> id101([workflows]) + +id10([services]) --> id101([workflows]) + +id44([educational resources]) --> id21([university libraries]) + +id21([university libraries]) --> id101([workflows]) + +id22([federal data archives]) --> id101([workflows]) + +id4([data]) --> id21([university libraries]) & id22([federal data archives]) & id14([public data registries]) + +id101([workflows]) --> id15([on-premises]) & id16([commercial cloud]) & id17([public cloud]) + +``` + +Mermaid Diagram: Conceptual relationships of Open Science and cyberinfrastructure + +
+ +??? Tip ":dark_sunglasses: Awesome Lists of Open Science" + + Awesome lists were started on GitHub by [Sindre Sorhus](https://sindresorhus.com/){target=_blank} and typically have a badge associated with them [![[Awesome]([https://github.com/sindresorhus/awesome])](https://cdn.rawgit.com/sindresorhus/awesome/d7305f38d29fed78fa85652e3a63e154dd8e8829/media/badge.svg)](https://github.com/sindresorhus/awesome){target=_blank} + + (There is even a [Searchable Index](https://awesomelists.top/#/){target=_blank} of Awesome Lists) + + We have created our own [Awesome Open Science List here](https://tyson-swetnam.github.io/awesome-open-science/){target=_blank} which may be valuable to you. + +
+
+ +### **:material-pillar: Open Access Publications** + +[![open access](https://upload.wikimedia.org/wikipedia/commons/f/f3/Open_Access_PLoS.svg){width=300}](https://en.wikipedia.org/wiki/Open_access){target=_blank} + +??? Quote "Definitions" + + "Open access is a publishing model for scholarly communication that makes research information available to readers at no cost, as opposed to the traditional subscription model in which readers have access to scholarly information by paying a subscription (usually via libraries)." -- [OpenAccess.nl](https://www.openaccess.nl/en/what-is-open-access){target=_blank} + +??? Tip "New Open Access Mandates in US" + + The White House Office of Science and Technology (OSTP) has recently released a [policy](https://www.whitehouse.gov/ostp/news-updates/2022/08/25/ostp-issues-guidance-to-make-federally-funded-research-freely-available-without-delay/){target=_blank} stating that tax-payer funded research must by open access by 2026. + +??? Example "Open Access Publishing" + + Major publishers have provided access points for publishing your work + + [AAAS](https://www.science.org/content/page/open-access-aaas){target=_blank} + + [Nature](https://www.nature.com/nature-portfolio/open-access){target=_blank} + + [American Geophysical Union](https://www.agu.org/Publish-with-AGU/Publish/Open-Access){target=_blank} + + [Commonwealth Scientific and Industrial Research Organisation (CSIRO)](https://acsopenscience.org/australia-csiro/){target=_blank} + + [Open Research Europe](https://open-research-europe.ec.europa.eu/){target=_blank} + + [PLOS](https://plos.org/){target=_blank} + + [MDPI](https://www.mdpi.com/){target=_blank} + + [Ecosphere](https://esajournals.onlinelibrary.wiley.com/journal/21508925){target=_blank} + +??? Example "Financial Support for Open Access Publishing Fees" + + There are mechanisms for helping to pay for the additional costs of publishing research as open access: + + [SciDevNet](https://www.scidev.net/global/){target=_blank} + + [Health InterNetwork Access to Research Initiative (HINARI)](http://www.emro.who.int/information-resources/hinari/){target=_blank} + + Some institutions offer support for managing publishing costs (check to see if your institution has such support): + + [University of Arizona Open Access Investment Fund](https://new.library.arizona.edu/about/awards/oa-fund){target=_blank} + + [Colorado University at Boulder Open Access Fund](https://www.colorado.edu/libraries/research/open-access/open-access-fund){target=_blank} + + [Max Planck Digital Library](https://group.springernature.com/gb/group/media/press-releases/landmark-agreement-between-springer-nature-and-mpdl/18498782){target=_blank} - German authors can have OA fees in Springer Nature research journals paid for. + + [Bibsam Consortium](https://www.springernature.com/gp/open-research/institutional-agreements/oaforsweden){target=_blank} - Swedish authors can have OA fees in Springer Nature research journals paid for. + + +??? Example "Pre-print Services" + + [ASAPbio Pre-Print Server List](https://asapbio.org/preprint-servers){target=_blank} - ASAPbio is a scientist-driven non-profit promoting transparency and innovation comprehensive list of pre-print servers inthe field of life science communication. + + [ESSOar](https://www.essoar.org/){target=_blank} - Earth and Space Science Open Archive hosted by the American Geophysical Union. + + [Peer Community In (PCI)](https://peercommunityin.org/) a free recommendation process of scientific preprints based on peer reviews + + [OSF.io Preprints](https://osf.io/preprints/){target=_blank} are partnered with numerous projects under the "-rXivs" + + ??? Tip "The rXivs" + + [AfricArXiv](https://osf.io/preprints/africarxiv/){target=_blank} + + [AgrirXiv](https://cabidigitallibrary.org/journal/agrirxiv){target=_blank} + + [Arabixiv](https://arabixiv.org/discover){target=_blank} + + [arXiv](https://arxiv.org/){target=_blank} - is a free distribution service and an open-access archive for 2,086,431 scholarly articles in the fields of physics, mathematics, computer science, quantitative biology, quantitative finance, statistics, electrical engineering and systems science, and economics. + + [BioHackrXiv](https://biohackrxiv.org/){target=_blank} + + [BioRxiv](https://www.biorxiv.org/){target=_blank} - is an open access preprint repository for the biological sciences. + + [BodorXiv](https://bodoarxiv.wordpress.com/){target=_blank} + + [EarthArXiv](https://eartharxiv.org/){target=_blank} - is an open access preprint repository for the Earth sciences. + + [EcsArXiv](https://ecsarxiv.org/){target=_blank} - a free preprint service for electrochemistry and solid state science and technology + + [EdArXiv](https://edarxiv.org/){target=_blank} - for the education research community + + [EngrXiv](https://engrxiv.org/){target=_blank} for the engineering community + + [EvoEcoRxiv](https://www.ecoevorxiv.com/){target=_blank} - is an open acccess preprint repository for Evolutionary and Ecological sciences. + + [MediArXiv](https://mediarxiv.com/){target=_blank} for Media, Film, & Communication Studies + + [MedRxiv](https://www.medrxiv.org/){target=_blank} - is an open access preprint repository for Medical sciences. + + [PaleorXiv](https://paleorxiv.org/){target=_blank} - is an open access preprint repository for Paleo Sciences + + [PsyrXiv](https://psyarxiv.com/){target=_blank} - is an open access preprint repository for Psychological sciences. + + [SocArXiv](https://socopen.org/){target=_blank} - is an open access preprint repository for Social sciences. + + [SportrXiv](https://sportrxiv.org/){target=_blank} - is an open access preprint for Sports sciences. + + [ThesisCommons](https://thesiscommons.org/) - open Theses + +
+
+ +### **:material-pillar: Open Data** + +Open Data are a critical aspect of open science. There are three key attributes of Open Data: + +* Availability and accessibility +* Reusability +* Inclusivity + +??? Quote "Definitions" + + “Open data and content can be freely used, modified, and shared by anyone for any purpose” - [The Open Definition](https://opendefinition.org/){target=_blank} + + "Open data is data that can be freely used, re-used and redistributed by anyone - subject only, at most, to the requirement to attribute and sharealike." - [Open Data Handbook](https://opendatahandbook.org/guide/en/what-is-open-data/){target=_blank} + + [:material-wikipedia: Wikipedia definition](https://en.wikipedia.org/wiki/Open_data){target=_blank} + +??? Tip "DIKW Pyramid" + + Data are the basis of our understanding the natural world. The Data-Information-Knowledge-Wisdom (DIKW) pyramid describes for us how data are refined into information and knowledge. + + [![dikw](https://upload.wikimedia.org/wikipedia/commons/0/06/DIKW_Pyramid.svg)](https://en.wikipedia.org/wiki/DIKW_pyramid){target=_blank} + +??? Tip "FAIR & CARE Principles" + + **FAIR Principles** + + In 2016, the [FAIR Guiding Principles](https://www.nature.com/articles/sdata201618) for scientific data management and stewardship were + published in Scientific Data. Read it. + + *Findable* + + - F1. (meta)data are assigned a globally unique and persistent identifier + - F2. data are described with rich metadata (defined by R1 below) + - F3. metadata clearly and explicitly include the identifier of the data it describes + - F4. (meta)data are registered or indexed in a searchable resource + + *Accessible* + + - A1. (meta)data are retrievable by their identifier using a + standardized communications protocol + - A1.1 the protocol is open, free, and universally implementable + - A1.2 the protocol allows for an authentication and authorization + procedure, where necessary + - A2. metadata are accessible, even when the data are no longer + available + + *Interoperable* + + - I1. (meta)data use a formal, accessible, shared, and broadly + applicable language for knowledge representation. + - I2. (meta)data use vocabularies that follow FAIR principles + - I3. (meta)data include qualified references to other (meta)data + + *Reusable* + + - R1. meta(data) are richly described with a plurality of accurate + and relevant attributes + - R1.1. (meta)data are released with a clear and accessible data + usage license + - R1.2. (meta)data are associated with detailed provenance + - R1.3. (meta)data meet domain-relevant community standard + + !!! Tip "Open vs. Public vs. FAIR" + + FAIR does not demand that data be open: See one definition of open: + http://opendefinition.org/ + + !!! Question "Why Principles?" + + FAIR is a collection of principles. Ultimately, different + communities within different scientific disciplines must work to + interpret and implement these principles. Because technologies + change quickly, focusing on the desired end result allows FAIR to be + applied to a variety of situations now and in the foreseeable + future. + + **CARE Principles** + + The [CARE Principles](https://www.gida-global.org/care) for Indigenous Data Governance were drafted at the International Data Week and Research Data Alliance Plenary co-hosted event "Indigenous Data Sovereignty Principles for the Governance of Indigenous Data Workshop," 8 November 2018, Gaborone, Botswana. + + *Collective Benefit* + + - C1. For inclusive development and innovation + - C2. For improved governance and citizen engagement + - C3. For equitable outcomes + + **Authority to Control* + + - A1. Recognizing rights and interests + - A2. Data for governance + - A3. Governance of data + + *Responsibility* + + - R1. For positive relationships + - R2. For expanding capability and capacity + - R3. For Indigenous languages and worldviews + + *Ethics* + + - E1. For minimizing harm and maximizing benefit + - E2. For justice + - E3. For future use + + **FAIR - TLC** + + *Traceable, Licensed, and Connected* + + - The need for metrics: https://zenodo.org/record/203295#.XkrzTxNKjzI + + **How to get to FAIR?** + + This is a question that only you can answer, that is because it depends + on (among other things) + + 1. Your scientific discipline: Your datatypes and existing standards + for what constitutes acceptable data management will vary. + 2. The extent to which your scientific community has implemented + FAIR: Some disciplines have significant guidelines on FAIR, while + others have not addressed the subject in any concerted way. + 3. Your level of technical skills: Some approaches to implementing + FAIR may require technical skills you may not yet feel comfortable + with. + + While a lot is up to you, the first step is to evaluate how FAIR you + think your data are: + + ??? Question "Exercise" + Thinking about a dataset you work with, complete the ARDC [FAIR assessment](https://ardc.edu.au/resource/fair-data-self-assessment-tool/). + + ??? Note "Resources" + + - [The FAIR Guiding Principles for scientific data management and stewardship]() + - [Wilkinson et al. (2016)](https://doi.org/10.1038/sdata.2016.18){target=_blank} established the guidelines to improve the Findability, Accessibility, Interoperability, and Reuse (FAIR) of digital assets for research. + - [Go-FAIR website](https://www.go-fair.org/fair-principles/){target=_blank} + - [Carroll et al. (2020)](http://doi.org/10.5334/dsj-2020-043){target=_blank} established the CARE Principles for Indigenous Data Governance. [full document :fontawesome-solid-file-pdf:](https://static1.squarespace.com/static/5d3799de845604000199cd24/t/5da9f4479ecab221ce848fb2/1571419335217/CARE+Principles_One+Pagers+FINAL_Oct_17_2019.pdf){target=_blank} + - [Indigenous Data Sovereignty Networks](https://indigenousdatalab.org/networks/){target=_blank} + + !!! tip "Connecting FOSS and CARE: [Lydia Jennings](https://nativesoilnerd.com/)" + Lydia was a Data Science Fellow at the University of Arizona, attending FOSS in the Fall of 2022. Since then, Lydia graduated from the University of Arizona's Department of Evironemtal Sciences, and published a paper on the application of the CARE principles to ecology and biodiversity research. Go Lydia! + + Check it out! [Appying the 'CARE Principles for Indigenous Data Governance' to ecology and biodiversity](https://www.nature.com/articles/s41559-023-02161-2), *Nature Ecology & Evolution*, 2023. + +??? Tip "Linked Open Data Cloud" + + [The Linked Open Data Cloud](https://lod-cloud.net/){target=_blank} shows how data are [linked to one another](https://www.w3.org/standards/semanticweb/data){target=_blank} forming the basis of the [semantic web :material-wikipedia:](https://en.wikipedia.org/wiki/Semantic_Web){target=_blank}. + + [![linked open data](https://lod-cloud.net/clouds/lod-cloud.svg)](https://lod-cloud.net/clouds/lod-cloud.svg){target=_blank} + +
+
+ +### **:material-pillar: Open Educational Resources** + +[![open educational resources](https://upload.wikimedia.org/wikipedia/commons/2/20/Global_Open_Educational_Resources_Logo.svg){width=240}](https://www.unesco.org/en/communication-information/open-solutions/open-educational-resources) + +??? Quote "Definitions" + + "Open Educational Resources (OER) are learning, teaching and research materials in any format and medium that reside in the public domain or are under copyright that have been released under an open license, that permit no-cost access, re-use, re-purpose, adaptation and redistribution by others." - [UNESCO](https://www.unesco.org/en/communication-information/open-solutions/open-educational-resources){target=_blank} + + [:material-wikipedia: Wikipedia definition](https://en.wikipedia.org/wiki/Open_educational_resources){target=_blank} + +??? Example "Digital Literacy Organizations" + + [The Carpentries](https://carpentries.org/){target=_blank} - teaches foundational coding and data science skills to researchers worldwide + + [EdX](https://www.edx.org/){target=_blank} - Massively Online Online Courses (not all open) hosted through University of California Berkeley + + [EveryoneOn](https://www.everyoneon.org/ ){target=_blank} - mission is to unlock opportunity by connecting families in underserved communities to affordable internet service and computers, and delivering digital skills trainings + + [ConnectHomeUSA](https://connecthomeusa.org/){target=_blank} - is a movement to bridge the digital divide for HUD-assisted housing residents in the United States under the leadership of national nonprofit EveryoneOn + + [Global Digital Literacy Council](https://www.gdlcouncil.org/){target=_blank} - has dedicated more than 15 years of hard work to the creation and maintenance of worldwide standards in digital literacy + + [IndigiData](https://indigidata.nativebio.org/){target=_blank} - training and engaging tribal undergraduate and graduate students in informatics + + [National Digital Equity Center](https://digitalequitycenter.org/about-us/){target=_blank} a 501c3 non-profit, is a nationally recognized organization with a mission to close the digital divide across the United States + + [National Digital Inclusion Allaince](https://www.digitalinclusion.org/){target=_blank} - advances digital equity by supporting community programs and equipping policymakers to act + + [Net Literacy](https://www.netliteracy.org/){target=_blank} + + [Open Educational Resources Commons](https://www.oercommons.org/){target=_blank} + + [Project Pythia](https://projectpythia.org/){target=_blank} is the education working group for Pangeo and is an educational resource for the entire geoscience community + + [Research Bazaar](https://resbaz.github.io/resbaz2021/){target=_blank} - is a worldwide festival promoting the digital literacy emerging at the centre of modern research + + [TechBoomers](https://techboomers.com/){target=_blank} - is an education and discovery website that provides free tutorials of popular websites and Internet-based services in a manner that is accessible to older adults and other digital technology newcomers + +??? Example "Educational Materials" + + [Teach Together](https://teachtogether.tech/en/index.html#){target=_blank} by Greg Wilson + + [DigitalLearn](https://www.digitallearn.org/){target=_blank} + +
+
+ +### **:material-pillar: Open Methodology** + +[![plos open methods](https://plos.org/wp-content/uploads/2021/08/Methods-infographic-option-c-2-edit.png)](https://plos.org/open-science/open-methods/){target=_blank} + +The use of version control systems like [GitHub](https://github.com/search?q=open+science){target=_blank} and [GitLab](https://gitlab.com/explore/projects/topics/Open%20Science){target=_blank} present one of the foremost platforms for sharing open methods for digital research. + +??? Quote "Definitions" + + "An open methodology is simply one which has been described in sufficient detail to allow other researchers to repeat the work and apply it elsewhere." - [Watson (2015)](https://doi.org/10.1186/s13059-015-0669-2){target=_blank} + + "Open Methodology refers to opening up methods that are used by researchers to achieve scientific results and making them publicly available." - [Open Science Network Austria](https://www.oana.at/en/about-open-science){target=_blank} + +??? Example "Protocols and Bench Techniques" + + [BioProtocol](https://bio-protocol.org/Default.aspx){target=_blank} + + [Current Protocols](https://currentprotocols.onlinelibrary.wiley.com/){target=_blank} + + [Gold Biotechnology Protocol list](https://www.goldbio.com/search?q=&type=documentation&documentation_type=protocol){target=_blank} + + [JoVE](https://www.jove.com/){target=_blank} - Journal of Visualized Experiments + + [Nature Protocols](https://www.nature.com/nprot/){target=_blank} + + [OpenWetWare](https://openwetware.org/wiki/Main_Page){target=_blank} + + [Protocol Exchange](https://protocolexchange.researchsquare.com/){target=_blank} + + [Protocols Online](http://www.protocol-online.org/prot/){target=_blank} + + [:material-microscope: Protocols](https://www.protocols.io/){target=_blank} + + [SciGene](http://scigine.com/blog/){target=_blank} + + [Springer Nature Experiments](https://experiments.springernature.com/){target=_blank} + +??? Tip "Concept of Preregistration" + + In response to the Reproducibility Crisis, many researchers, particularly in fields like psychology, have begun to advocate for **preregistration** of studies. + + This involves writing out and publishing your entire research plan, from data collection to analysis and publication, for the sake of avoiding practices like [p-hacking](https://en.wikipedia.org/wiki/Data_dredging){target=_blank} or [HARKing](https://en.wikipedia.org/wiki/HARKing){target=_blank}. + + What preregistration also does is make the process of your work more open, including many of the small decisions and tweaks you make to a project that probably wouldn't make it into a manuscript. + + To learn more about preregistration, you can check out the [Open Science Foundation](https://osf.io/){target=_blank}, a project that provides a preregistration platform and other Open Science tools. You can also read this [publication](https://www.pnas.org/doi/10.1073/pnas.1708274114){target=_blank} + +
+
+ +### **:material-pillar: Open Peer Review** + +[![plos open peer review](https://theplosblog.plos.org/wp-content/uploads/sites/6/2020/05/TPR_chart-final-edited-003-scaled.jpg)](https://theplosblog.plos.org/2019/05/plos-journals-now-open-for-published-peer-review/){target=_blank} + +[Pros and Cons of Open Peer Review](https://doi.org/10.1038/6295){target=_blank} + +??? Quote "Definitions" + + Ross-Hellauer et al. (2017) ask [What is Open Peer Review?](https://doi.org/10.12688%2Ff1000research.11369.2){target=_blank} and state that there is no single agreed upon definition + + [:material-wikipedia: Wikipedia's definition](https://en.wikipedia.org/wiki/Open_peer_review){target=_blank} + + A manuscript review process that includes some combination of Open Identities, Open Reports, Open Participation, and even Open Interaction + +??? Tips "Open Peer Review Resources" + + [F1000Research](https://f1000research.com/){target=_blank} the first open research publishing platform. Offering open peer review rapid publication + + [PREreview](https://prereview.org/){target=_blank} provides a space for open peer reviews, targeted toward early career researchers. + + [ASAPbio](https://asapbio.org/){target=_blank} Accelerating Science and Publication in Biology, an open peer review source for biologists and life scientists. + + [PubPeer](https://pubpeer.com/){target=_blank} platform for post-publication of peer reviews. + + [Sciety](https://sciety.org/){target=_blank} platform for evaluating preprints. + +
+
+ +### **:material-pillar: Open Source Software** + +[![](https://upload.wikimedia.org/wikipedia/commons/4/42/Opensource.svg){width=240}](https://opensource.org/){target=_blank} + +??? Quote "Definitions" + + "Open source software is code that is designed to be publicly accessible—anyone can see, modify, and distribute the code as they see fit. Open source software is developed in a decentralized and collaborative way, relying on peer review and community production." - [:material-redhat: Red Hat](https://www.redhat.com/en/topics/open-source/what-is-open-source){target=_blank} + + [:material-open-source-initiative: Open Source Initiative definition](https://opensource.org/osd){target=_blank} + + [:material-wikipedia: Wikipedia definition](https://en.wikipedia.org/wiki/Open-source_software){target=_blank} + +[Awesome list](https://tyson-swetnam.github.io/awesome-open-science/software/){target=_blank} + +## Breakout Discussion 1 + +As you already know, being a scientist requires you to wear many hats, and trying to do Open Science is no different. + +
+ ![venn](https://iiif.elifesciences.org/lax/81075%2Felife-81075-fig2-v1.tif/full/,1500/0/default.jpg){ width="700" } +
[Bernery et al. (2022)](https://doi.org/10.7554/eLife.81075){target=_blank} Figure 2: The positive aspects of doing a PhD.
+
+ + +As we mentioned, Open Science is not a set of boxes you need to check off to be "Certified Open", but an intersecting set of philosophies and approaches, all of which occur on some type of spectrum. + +To get a feel for how Open Science can be multifaceted and different for each researcher, we will do a short breakout group session to discuss what Open Science means to you. + +??? Question "What does Open Science mean to you?" + + ??? Example "Which of the :material-pillar: pillars of Open Science is nearest to your own heart?" + + **:material-pillar: Open Access Publications** + + **:material-pillar: Open Data** + + **:material-pillar: Open Educational Resources** + + **:material-pillar: Open Methodology** + + **:material-pillar: Open Peer Review** + + **:material-pillar: Open Source Software** + + ??? Example "Are any of the :material-pillar: pillars more important than the others?" + + ??? Example "Are there any :material-pillar: pillars not identified that you think should be considered?" + +??? Question "What characteristics might a paper, project, lab group require to qualify as doing *Open Science*" + +??? Question "What are some limitations to you, your lab group, or your domain?" + +--- + +## *WHY* do Open Science? + +There are many reasons to do Open Science, and presumably one or more of them brought you to this workshop. + +Whether you feel an ethical obligation, want to improve the quality of your work, or want to look better to funding agencies, many of the same approaches to Open Science apply. + +A paper from [Bartling & Friesike (2014)](https://doi.org/10.1007/978-3-319-00026-8){target=_blank} posits that there are 5 main schools of thought in Open Science, which represent 5 underlying motivations: + +1. **Democratic school**: primarily concerned with making scholarly work freely available to everyone + +2. **Pragmatic school**: primarily concerned with improving the quality of scholarly work by fostering collaboration and improving critiques + +3. **Infrastructure school**: primarily focused on the platforms, tools, and services necessary to conduct efficient research, collaboration, and communication + +4. **Public school**: primarily concerned with societal impact of scholarly work, focusing on engagement with broader public via citizen science, understandable scientific communication, and less formal communication + +5. **Measurement school**: primarily concerned with the existing focus on journal publications as a means of measuring scholarly output, and focused on developing alternative measurements of scientific impact + + +
+ ![fecher_friesike](assets/five_schools.png){ width="700" } +
In [Bartling & Friesike (2014)](https://doi.org/10.1007/978-3-319-00026-8){target=_blank} Open Science: One Term, Five Schools of Thought
+
+ +While many researchers may be motivated by one or more of these aspects, we will not necessarily focus on any of them in particular. If anything, FOSS may be slightly more in the Infrastructure school, because we aim to give you the tools to do Open Science based on your own underlying motivations. + +## Breakout Discussion 2 + +Let's break out into groups again to discuss some of our motivations for doing Open Science. + +??? Question "What motivates you to do Open Science?" + +??? Question "Do you feel that you fall into a particular "school"? If so, which one, and why?" + +??? Question "Are there any motivating factors for doing Open Science that don't fit into this framework?" + +## Ethos of Open Science + +Doing Open Science requires us to understand the ethics of why working with data which do not belong to us is privileged. + +We must also anticipate how these could be re-used [in ways contrary to the interests of humanity](https://www.theverge.com/2022/3/17/22983197/ai-new-possible-chemical-weapons-generative-models-vx){target=_blank}. + +Ensure the use of [Institutional Review Boards (IRB)](https://www.fda.gov/about-fda/center-drug-evaluation-and-research-cder/institutional-review-boards-irbs-and-protection-human-subjects-clinical-trials){target=_blank} or your local ethical committee. + +Areas to consider: + +[![ethics assessment](https://uksa.statisticsauthority.gov.uk/wp-content/uploads/2019/05/Self-Assessment-Map-2.0-1024x888.png)](https://uksa.statisticsauthority.gov.uk/the-authority-board/committees/national-statisticians-advisory-committees-and-panels/national-statisticians-data-ethics-advisory-committee/ethics-self-assessment-tool/){target=_blank} + +Source: [UK Statistics Authority](https://uksa.statisticsauthority.gov.uk/the-authority-board/committees/national-statisticians-advisory-committees-and-panels/national-statisticians-data-ethics-advisory-committee/ethics-self-assessment-tool/){target=_blank} + +* Geolocation (survey, land ownership, parcel data), see [UK Statistics Authority Ethical Considerations](https://uksa.statisticsauthority.gov.uk/publication/ethical-considerations-in-the-use-of-geospatial-data-for-research-and-statistics/pages/1/){target=_blank} +* Personal identification information [US Personal Identifiable Information (PII)](https://www.dol.gov/general/ppii){target=_blank}, [General Data Protection Regulation (GDPR)](https://gdpr.eu/what-is-gdpr){target=_blank} +* Health information [US HIPAA](https://www.hhs.gov/hipaa/index.html){target=_blank} , EU GDPR +* Protected and Endangered Species ([US Endangered Species Act](https://www.epa.gov/laws-regulations/summary-endangered-species-act){target=_blank}) +* Indigenous data sovereignty: [CARE Principles for Indigenous Data Governance](http://doi.org/10.5334/dsj-2020-043){target=_blank} , [Global Indigenous Data Alliance (GIDA)](https://www.gida-global.org/care){target=_blank}, [First Nations OCAP® (Ownership Control Access and Possession)](https://fnigc.ca/ocap-training/){target=_blank}, [Circumpolar Inuit Protocols for Equitable and Ethical Engagement](https://www.arcus.org/arctic-info/archive/33236){target=_blank} +* Artificial intelligence/machine learning [Assessment List Trustworthy AI (ALTAI)](https://futurium.ec.europa.eu/en/european-ai-alliance/pages/welcome-altai-portal){target=_blank} from the European AI Alliance + +[:material-wikipedia: "Nothing about us, without us"](https://en.wikipedia.org/wiki/Nothing_About_Us_Without_Us){target=_blank} + +- [Funnel et al. (2019)](http://dx.doi.org/10.1017/S0714980819000291){target=_blank} + +For more information (training): + +[January in Tucson](https://igp.arizona.edu/jit){target=_blank} - intensive education session brings together distinguished faculty in the field of Indigenous governance and Indigenous rights, and gives them the opportunity to teach and hold discussions with Indigenous leaders, practitioners, and community members, and anyone interested in Indigenous affairs. + +Ethics and Data Access (General Information with BioMedical and Life Sciences Data) includes [a legal and ethical checklist lesson for researchers](https://ilias.fraunhofer.de/goto.php?target=fold_15177&client_id=fraunhofer){target=_blank} around "FAIR Plus". + + +## Recommended Open Science Communities + +[![The Turing Way](https://the-turing-way.netlify.app/_static/logo-detail-with-text.svg){width=150}](https://the-turing-way.netlify.app/welcome.html) +[![NASA Transform to Open Science](https://zenodo.org/record/7262104/files/Tops_Badge_Nasa.png){width=150}](https://github.com/nasa/Transform-to-Open-Science) +[![Foster Open Science](https://www.fosteropenscience.eu/images/logos/FOSTER-hires.png){width=300}](https://www.fosteropenscience.eu/) +[![The Carpentries](https://carpentries.org/assets/img/TheCarpentries.svg)](https://carpentries.org/) +[![COS](https://www.cos.io/hubfs/Cos_2020/Images/cos_logo.png){width=200}](https://www.cos.io/) + +[:material-school: Open Scholarship Grassroots Community Networks](https://docs.google.com/spreadsheets/d/1LNF5_bOkRV-RLIF4HYmu-gOemIa4IdfXEer89fM-Vy8/edit#gid=847887324){target=_blank} + +??? Info ":fontawesome-solid-earth-europe: International Open Science Networks" + + [Center for Scientific Collaboration and Community Engagement (CSCCE)](https://www.cscce.org/){target=_blank} + + [Center for Open Science (COS)](https://www.cos.io/){target=_blank} + + [Eclipse Science Working Group](https://science.eclipse.org/){target=_blank} + + [eLife](https://elifesciences.org/){target=_blank} + + [NumFocus](https://numfocus.org/){target=_blank} + + [Open Access Working Group](https://sparcopen.org/people/open-access-working-group/){target=_blank} + + [Open Research Funders Group](https://www.orfg.org/) + + [Open Science Foundation](https://osf.io/){target=_blank} + + [Open Science Network](https://www.opensciencenetwork.org/){target=_blank} + + [pyOpenSci](https://www.pyopensci.org/){target=_blank} + + [R OpenSci](https://ropensci.org/){target=_blank} + + [Research Data Alliance (RDA)](https://www.rd-alliance.org/){target=_blank} + + [The Turing Way](https://the-turing-way.netlify.app/welcome){target=_blank} + + [UNESCO Global Open Science Partnership](https://en.unesco.org/science-sustainable-future/open-science/partnership){target=_blank} + + [World Wide Web Consortium (W3C)](https://www.w3.org/){target=_blank} + +??? Info ":fontawesome-solid-earth-americas: US-based Open Science Networks" + + [CI Compass](https://ci-compass.org/){target=_blank} - provides expertise and active support to cyberinfrastructure practitioners at USA NSF Major Facilities in order to accelerate the data lifecycle and ensure the integrity and effectiveness of the cyberinfrastructure upon which research and discovery depend. + + [Earth Science Information Partners (ESIP) Federation](https://www.esipfed.org/){target=_blank} - is a 501(c)(3) nonprofit supported by NASA, NOAA, USGS and 130+ member organizations. + + [Internet2](https://internet2.edu/){target=_blank} - is a community providing cloud solutions, research support, and services tailored for Research and Education. + + [Minority Serving Cyberinfrastructure Consortium (MS-CC)](https://www.ms-cc.org/){target=_blank} envisions a transformational partnership to promote advanced cyberinfrastructure (CI) capabilities on the campuses of Historically Black Colleges and Universities (HBCUs), Hispanic-Serving Institutions (HSIs), Tribal Colleges and Universities (TCUs), and other Minority Serving Institutions (MSIs). + + [NASA Transform to Open Science (TOPS)](https://github.com/nasa/Transform-to-Open-Science){target=_blank} - coordinates efforts designed to rapidly transform agencies, organizations, and communities for Earth Science + + [OpenScapes](https://www.openscapes.org/){target=_blank} - is an approach for doing better science for future us + + [The Quilt](https://www.thequilt.net/){target=_blank} - non-profit regional research and education networks collaborate to develop, deploy and operate advanced cyberinfrastructure that enables innovation in research and education. + +??? Info ":fontawesome-solid-earth-oceania: Oceania Open Science Networks" + + [New Zealand Open Research Network](https://nzorn.netlify.app/) - New Zealand Open Research Network (NZORN) is a collection of researchers and research-associated workers in New Zealand. + + [Australia & New Zealand Open Research Network](https://www.anzopenresearch.org/) - ANZORN is a network of local networks distributed without Australia and New Zealand. + +--- + +## Self Assessment + +??? Question "True or False: All research papers published in the top journals, like Science and Nature, are always Open Access?" + + ??? Success "Answer" + + False + + Major Research journals like [Science](https://www.science.org/content/page/open-access-aaas){target=_blank} and [Nature](https://www.nature.com/nature-portfolio/open-access){target=_blank} have an "Open Access" option when a manuscript is accepted, but they charge an extra fee to the authors to make those papers Open Access. + + These [high page costs](https://www.science.org/content/article/9500-nature-journals-will-now-make-your-paper-free-read){target=_blank} are exclusionary to the majority of global scientists who cannot afford to front these costs out of pocket. + + This will soon change, at least in the United States. The [Executive Branch of the federal government recently mandated](https://www.nature.com/articles/d41586-022-02351-1){target=_blank} that future federally funded research be made Open Access after 2026. + + +??? Question "True or False: an article states all of the research data used in the experiments "are available upon request from the corresponding author(s)," meaning the data are "Open"" + + ??? Success "Answer" + + False + + In order for research to be open, the data need to be freely available from a digital repository, like [Data Dryad](https://datadryad.org){target=_blank}, [Zenodo.org](https://zenodo.org){target=_blank}, or [CyVerse](https://cyverse.org/data-commons){target=_blank}. + + Data that are 'available upon request' do not meet the FAIR data principles. + +??? Question "True or False: Online Universities and Data Science Boot Camps like UArizona Online, Coursera, Udemy, etc. promote digital literacy and are Open Educational Resources?" + + ??? Success "Answer" + + False + + These examples are for-profit programs which teach data science and computer programming online. Some may be official through public or private universities and offer credits toward a degree or a certificate. Some of these programs are [known to be predatory](https://www.highereddive.com/news/court-fines-ashford-university-and-zovio-224m-for-misleading-students/620058/){target=_blank}. + + The organizations we have [listed above](#material-pillar-open-educational-resources) are Open Educational Resources - they are free and available to anyone who wants to work with them asynchronously, virtually, or in person. + +??? Question "Using a version control system to host the analysis code and computational notebooks, and including these in your Methods section or Supplementary Materials, is an example of an Open Methodology?" + + ??? Success "Answer" + + Yes! + + Using a VCS like GitHub or GitLab is a great step towards making your research more reproducible. + + Ways to improve your open methology can include documentation of your physical bench work, and even video recordings and step-by-step guides for every part of your project. + +??? Question "You are asked to review a paper for an important journal in your field. The editor asks if you're willing to release your identity to the authors, thereby "signing" your review. Is this an example of "Open Peer Review"?" + + ??? Success "Answer" + + No + + Just because you've given your name to the author(s) of the manuscript, this does not make your review open. + + If the journal later publishes your review alongside the final manuscript, than you will have participated in an Open Review. + +??? Question "You read a paper where the author(s) wrote their own code and licensed as "Open Source" software for a specific set of scientific tasks which you want to replicate. When you visit their personal website, you find the GitHub repository does not exist (because its now private). You contact the authors asking for access, but they refuse to share it 'due to competing researchers who are seeking to steal their intellectual property". Is the software open source?" + + ??? Success "Answer" + + No + + Just because an author states they have given their software a permissive software license, does not make the software open source. + + Always make certain there is a [LICENSE](https://choosealicense.com/licenses/){target=_blank} associated with any software you find on the internet. + + In order for the software to be open, it must follow the [Open Source Initiative definition](https://opensource.org/osd){target=_blank} diff --git a/01_intro_open_sci/index.html b/01_intro_open_sci/index.html new file mode 100644 index 000000000..f7820833d --- /dev/null +++ b/01_intro_open_sci/index.html @@ -0,0 +1,1795 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1. Open Science - CyVerse Foundational Open Science Skills 2023 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Introduction to Open Science

+
+

Learning Objectives

+
After this lesson, you should be able to:
+
+* Explain what Open Science is
+* Explain the components of Open Science
+* Describe the behaviors of Open Science
+* Explain why Open Science matters in education, research, and society
+* Understand the advantages and the challenges to Open Science
+* Identify who the practitioners of Open Science are
+* Understand the underlying Ethos of Open Science
+
+
+

2023 is the Year of Open Science!

+

The White House, joined by 10 federal agencies, and a coalition of more than 85 universities, declared 2023 the Year of Open Science.

+
+

open science +

2023 is the year of Open Science

+
+

What is Open Science?

+

If you ask a dozen researchers this question, you will probably get just as many answers.

+

This means that Open Science isn't necessarily a set of checkboxes you need to tick, but rather a holistic approach to doing science. In that spirit, it can also be useful to think about Open Science as a spectrum, from less to more open.

+
+Definitions +

"Open Science is defined as an inclusive construct that combines various movements and practices aiming to make multilingual scientific knowledge openly available, accessible and reusable for everyone, to increase scientific collaborations and sharing of information for the benefits of science and society, and to open the processes of scientific knowledge creation, evaluation and communication to societal actors beyond the traditional scientific community." - UNESCO Definition

+ +

"Open Science is the movement to make scientific research (including publications, data, physical samples, and software) and its dissemination accessible to all levels of society, amateur or professional..." Wikipedia definition

+

Open and Collaborative Science Network's Open Science Manifesto

+
+
+Six Pillars of Open Science +

Open Access Publications

+

Open Data

+

Open Educational Resources

+

Open Methodology

+

Open Peer Review

+

Open Source Software

+
+Wait, how many pillars of Open Science Really Are There? +
The number can be from [4 :material-pillar:](https://narratives.insidehighered.com/four-pillars-of-open-science/){target=_blank} to [8 :material-pillar:](https://www.ucl.ac.uk/library/research-support/open-science/8-pillars-open-science){target=_blank}
+
+
+
+
+Foster Open Science Diagram +

foster

+
     Graphic by [Foster Open Science](https://www.fosteropenscience.eu/){target=_blank}
+
+
+
flowchart LR
+
+id1([open science]) --> id3([publishing]) & id4([data]) & id5([open source software])
+
+id3([publishing]) -->  id41([access]) & id42([reviews]) & id43([methods]) & id44([educational resources]) 
+
+id5([open source software]) --> id13([container registries]) & id10([services]) & id101([workflows]) & id12([version control systems])
+
+id12([version control systems]) --> id101([workflows])
+
+id13([container registries]) --> id101([workflows])
+
+id14([public data registry]) --> id101([workflows])
+
+id10([services]) --> id101([workflows]) 
+
+id44([educational resources]) --> id21([university libraries])
+
+id21([university libraries]) --> id101([workflows])
+
+id22([federal data archives]) --> id101([workflows]) 
+
+id4([data]) --> id21([university libraries]) & id22([federal data archives]) & id14([public data registries]) 
+
+id101([workflows]) --> id15([on-premises]) & id16([commercial cloud]) & id17([public cloud])
+
+

Mermaid Diagram: Conceptual relationships of Open Science and cyberinfrastructure

+


+
+🕶 Awesome Lists of Open Science +

Awesome lists were started on GitHub by Sindre Sorhus and typically have a badge associated with them [Awesome]([https://github.com/sindresorhus/awesome])

+

(There is even a Searchable Index of Awesome Lists)

+

We have created our own Awesome Open Science List here which may be valuable to you.

+
+


+

+

Open Access Publications

+

open access

+
+Definitions +

"Open access is a publishing model for scholarly communication that makes research information available to readers at no cost, as opposed to the traditional subscription model in which readers have access to scholarly information by paying a subscription (usually via libraries)." -- OpenAccess.nl

+
+
+New Open Access Mandates in US +

The White House Office of Science and Technology (OSTP) has recently released a policy stating that tax-payer funded research must by open access by 2026.

+
+
+Open Access Publishing +

Major publishers have provided access points for publishing your work

+

AAAS

+

Nature

+

American Geophysical Union

+

Commonwealth Scientific and Industrial Research Organisation (CSIRO)

+

Open Research Europe

+

PLOS

+

MDPI

+

Ecosphere

+
+
+Financial Support for Open Access Publishing Fees +

There are mechanisms for helping to pay for the additional costs of publishing research as open access:

+

SciDevNet

+

Health InterNetwork Access to Research Initiative (HINARI)

+

Some institutions offer support for managing publishing costs (check to see if your institution has such support):

+

University of Arizona Open Access Investment Fund

+

Colorado University at Boulder Open Access Fund

+

Max Planck Digital Library - German authors can have OA fees in Springer Nature research journals paid for.

+

Bibsam Consortium - Swedish authors can have OA fees in Springer Nature research journals paid for.

+
+
+Pre-print Services +

ASAPbio Pre-Print Server List - ASAPbio is a scientist-driven non-profit promoting transparency and innovation comprehensive list of pre-print servers inthe field of life science communication.

+

ESSOar - Earth and Space Science Open Archive hosted by the American Geophysical Union.

+

Peer Community In (PCI) a free recommendation process of scientific preprints based on peer reviews

+

OSF.io Preprints are partnered with numerous projects under the "-rXivs"

+
+The rXivs +

AfricArXiv

+

AgrirXiv

+

Arabixiv

+

arXiv - is a free distribution service and an open-access archive for 2,086,431 scholarly articles in the fields of physics, mathematics, computer science, quantitative biology, quantitative finance, statistics, electrical engineering and systems science, and economics.

+

BioHackrXiv

+

BioRxiv - is an open access preprint repository for the biological sciences.

+

BodorXiv

+

EarthArXiv - is an open access preprint repository for the Earth sciences.

+

EcsArXiv - a free preprint service for electrochemistry and solid state science and technology

+

EdArXiv - for the education research community

+

EngrXiv for the engineering community

+

EvoEcoRxiv - is an open acccess preprint repository for Evolutionary and Ecological sciences.

+

MediArXiv for Media, Film, & Communication Studies

+

MedRxiv - is an open access preprint repository for Medical sciences.

+

PaleorXiv - is an open access preprint repository for Paleo Sciences

+

PsyrXiv - is an open access preprint repository for Psychological sciences.

+

SocArXiv - is an open access preprint repository for Social sciences.

+

SportrXiv - is an open access preprint for Sports sciences.

+

ThesisCommons - open Theses

+
+
+


+

+

Open Data

+

Open Data are a critical aspect of open science. There are three key attributes of Open Data:

+
    +
  • Availability and accessibility
  • +
  • Reusability
  • +
  • Inclusivity
  • +
+
+Definitions +

“Open data and content can be freely used, modified, and shared by anyone for any purpose” - The Open Definition

+

"Open data is data that can be freely used, re-used and redistributed by anyone - subject only, at most, to the requirement to attribute and sharealike." - Open Data Handbook

+

Wikipedia definition

+
+
+DIKW Pyramid +

Data are the basis of our understanding the natural world. The Data-Information-Knowledge-Wisdom (DIKW) pyramid describes for us how data are refined into information and knowledge.

+

dikw

+
+
+FAIR & CARE Principles +

FAIR Principles

+

In 2016, the FAIR Guiding Principles for scientific data management and stewardship were +published in Scientific Data. Read it.

+

Findable

+
    +
  • F1. (meta)data are assigned a globally unique and persistent identifier
  • +
  • F2. data are described with rich metadata (defined by R1 below)
  • +
  • F3. metadata clearly and explicitly include the identifier of the data it describes
  • +
  • F4. (meta)data are registered or indexed in a searchable resource
  • +
+

Accessible

+
    +
  • A1. (meta)data are retrievable by their identifier using a + standardized communications protocol
  • +
  • A1.1 the protocol is open, free, and universally implementable
  • +
  • A1.2 the protocol allows for an authentication and authorization + procedure, where necessary
  • +
  • A2. metadata are accessible, even when the data are no longer + available
  • +
+

Interoperable

+
    +
  • I1. (meta)data use a formal, accessible, shared, and broadly + applicable language for knowledge representation.
  • +
  • I2. (meta)data use vocabularies that follow FAIR principles
  • +
  • I3. (meta)data include qualified references to other (meta)data
  • +
+

Reusable

+
    +
  • R1. meta(data) are richly described with a plurality of accurate + and relevant attributes
  • +
  • R1.1. (meta)data are released with a clear and accessible data + usage license
  • +
  • R1.2. (meta)data are associated with detailed provenance
  • +
  • R1.3. (meta)data meet domain-relevant community standard
  • +
+
+

Open vs. Public vs. FAIR

+
FAIR does not demand that data be open: See one definition of open:
+http://opendefinition.org/
+
+
+
+

Why Principles?

+
FAIR is a collection of principles. Ultimately, different
+communities within different scientific disciplines must work to
+interpret and implement these principles. Because technologies
+change quickly, focusing on the desired end result allows FAIR to be
+applied to a variety of situations now and in the foreseeable
+future.
+
+
+

CARE Principles

+

The CARE Principles for Indigenous Data Governance were drafted at the International Data Week and Research Data Alliance Plenary co-hosted event "Indigenous Data Sovereignty Principles for the Governance of Indigenous Data Workshop," 8 November 2018, Gaborone, Botswana.

+

Collective Benefit

+
    +
  • C1. For inclusive development and innovation
  • +
  • C2. For improved governance and citizen engagement
  • +
  • C3. For equitable outcomes
  • +
+

**Authority to Control*

+
    +
  • A1. Recognizing rights and interests
  • +
  • A2. Data for governance
  • +
  • A3. Governance of data
  • +
+

Responsibility

+
    +
  • R1. For positive relationships
  • +
  • R2. For expanding capability and capacity
  • +
  • R3. For Indigenous languages and worldviews
  • +
+

Ethics

+
    +
  • E1. For minimizing harm and maximizing benefit
  • +
  • E2. For justice
  • +
  • E3. For future use
  • +
+

FAIR - TLC

+

Traceable, Licensed, and Connected

+ +

How to get to FAIR?

+

This is a question that only you can answer, that is because it depends +on (among other things)

+
    +
  1. Your scientific discipline: Your datatypes and existing standards + for what constitutes acceptable data management will vary.
  2. +
  3. The extent to which your scientific community has implemented + FAIR: Some disciplines have significant guidelines on FAIR, while + others have not addressed the subject in any concerted way.
  4. +
  5. Your level of technical skills: Some approaches to implementing + FAIR may require technical skills you may not yet feel comfortable + with.
  6. +
+

While a lot is up to you, the first step is to evaluate how FAIR you +think your data are:

+
+Exercise +

Thinking about a dataset you work with, complete the ARDC FAIR assessment.

+
+
+Resources + +
+
+

Connecting FOSS and CARE: Lydia Jennings

+

Lydia was a Data Science Fellow at the University of Arizona, attending FOSS in the Fall of 2022. Since then, Lydia graduated from the University of Arizona's Department of Evironemtal Sciences, and published a paper on the application of the CARE principles to ecology and biodiversity research. Go Lydia!

+

Check it out! Appying the 'CARE Principles for Indigenous Data Governance' to ecology and biodiversity, Nature Ecology & Evolution, 2023.

+
+
+
+Linked Open Data Cloud +

The Linked Open Data Cloud shows how data are linked to one another forming the basis of the semantic web .

+

linked open data

+
+


+

+

Open Educational Resources

+

open educational resources

+
+Definitions +

"Open Educational Resources (OER) are learning, teaching and research materials in any format and medium that reside in the public domain or are under copyright that have been released under an open license, that permit no-cost access, re-use, re-purpose, adaptation and redistribution by others." - UNESCO

+

Wikipedia definition

+
+
+Digital Literacy Organizations +

The Carpentries - teaches foundational coding and data science skills to researchers worldwide

+

EdX - Massively Online Online Courses (not all open) hosted through University of California Berkeley

+

EveryoneOn - mission is to unlock opportunity by connecting families in underserved communities to affordable internet service and computers, and delivering digital skills trainings

+

ConnectHomeUSA - is a movement to bridge the digital divide for HUD-assisted housing residents in the United States under the leadership of national nonprofit EveryoneOn

+

Global Digital Literacy Council - has dedicated more than 15 years of hard work to the creation and maintenance of worldwide standards in digital literacy

+

IndigiData - training and engaging tribal undergraduate and graduate students in informatics

+

National Digital Equity Center a 501c3 non-profit, is a nationally recognized organization with a mission to close the digital divide across the United States

+

National Digital Inclusion Allaince - advances digital equity by supporting community programs and equipping policymakers to act

+

Net Literacy

+

Open Educational Resources Commons

+

Project Pythia is the education working group for Pangeo and is an educational resource for the entire geoscience community

+

Research Bazaar - is a worldwide festival promoting the digital literacy emerging at the centre of modern research

+

TechBoomers - is an education and discovery website that provides free tutorials of popular websites and Internet-based services in a manner that is accessible to older adults and other digital technology newcomers

+
+
+Educational Materials +

Teach Together by Greg Wilson

+

DigitalLearn

+
+


+

+

Open Methodology

+

plos open methods

+

The use of version control systems like GitHub and GitLab present one of the foremost platforms for sharing open methods for digital research.

+
+Definitions +

"An open methodology is simply one which has been described in sufficient detail to allow other researchers to repeat the work and apply it elsewhere." - Watson (2015)

+

"Open Methodology refers to opening up methods that are used by researchers to achieve scientific results and making them publicly available." - Open Science Network Austria

+
+
+Protocols and Bench Techniques +

BioProtocol

+

Current Protocols

+

Gold Biotechnology Protocol list

+

JoVE - Journal of Visualized Experiments

+

Nature Protocols

+

OpenWetWare

+

Protocol Exchange

+

Protocols Online

+

Protocols

+

SciGene

+

Springer Nature Experiments

+
+
+Concept of Preregistration +

In response to the Reproducibility Crisis, many researchers, particularly in fields like psychology, have begun to advocate for preregistration of studies.

+

This involves writing out and publishing your entire research plan, from data collection to analysis and publication, for the sake of avoiding practices like p-hacking or HARKing.

+

What preregistration also does is make the process of your work more open, including many of the small decisions and tweaks you make to a project that probably wouldn't make it into a manuscript.

+

To learn more about preregistration, you can check out the Open Science Foundation, a project that provides a preregistration platform and other Open Science tools. You can also read this publication

+
+


+

+

Open Peer Review

+

plos open peer review

+

Pros and Cons of Open Peer Review

+
+Definitions +

Ross-Hellauer et al. (2017) ask What is Open Peer Review? and state that there is no single agreed upon definition

+

Wikipedia's definition

+

A manuscript review process that includes some combination of Open Identities, Open Reports, Open Participation, and even Open Interaction

+
+
+Open Peer Review Resources +

F1000Research the first open research publishing platform. Offering open peer review rapid publication

+

PREreview provides a space for open peer reviews, targeted toward early career researchers.

+

ASAPbio Accelerating Science and Publication in Biology, an open peer review source for biologists and life scientists.

+

PubPeer platform for post-publication of peer reviews.

+

Sciety platform for evaluating preprints.

+
+


+

+

Open Source Software

+

+
+Definitions +

"Open source software is code that is designed to be publicly accessible—anyone can see, modify, and distribute the code as they see fit. Open source software is developed in a decentralized and collaborative way, relying on peer review and community production." - Red Hat

+

Open Source Initiative definition

+

Wikipedia definition

+
+

Awesome list

+

Breakout Discussion 1

+

As you already know, being a scientist requires you to wear many hats, and trying to do Open Science is no different.

+
+

venn +

Bernery et al. (2022) Figure 2: The positive aspects of doing a PhD.

+
+

As we mentioned, Open Science is not a set of boxes you need to check off to be "Certified Open", but an intersecting set of philosophies and approaches, all of which occur on some type of spectrum.

+

To get a feel for how Open Science can be multifaceted and different for each researcher, we will do a short breakout group session to discuss what Open Science means to you.

+
+What does Open Science mean to you? +
+Which of the pillars of Open Science is nearest to your own heart? +

Open Access Publications

+

Open Data

+

Open Educational Resources

+

Open Methodology

+

Open Peer Review

+

Open Source Software

+
+
+Are any of the pillars more important than the others? +
+
+Are there any pillars not identified that you think should be considered? +
+
+
+What characteristics might a paper, project, lab group require to qualify as doing Open Science +
+
+What are some limitations to you, your lab group, or your domain? +
+
+

WHY do Open Science?

+

There are many reasons to do Open Science, and presumably one or more of them brought you to this workshop.

+

Whether you feel an ethical obligation, want to improve the quality of your work, or want to look better to funding agencies, many of the same approaches to Open Science apply.

+

A paper from Bartling & Friesike (2014) posits that there are 5 main schools of thought in Open Science, which represent 5 underlying motivations:

+
    +
  1. +

    Democratic school: primarily concerned with making scholarly work freely available to everyone

    +
  2. +
  3. +

    Pragmatic school: primarily concerned with improving the quality of scholarly work by fostering collaboration and improving critiques

    +
  4. +
  5. +

    Infrastructure school: primarily focused on the platforms, tools, and services necessary to conduct efficient research, collaboration, and communication

    +
  6. +
  7. +

    Public school: primarily concerned with societal impact of scholarly work, focusing on engagement with broader public via citizen science, understandable scientific communication, and less formal communication

    +
  8. +
  9. +

    Measurement school: primarily concerned with the existing focus on journal publications as a means of measuring scholarly output, and focused on developing alternative measurements of scientific impact

    +
  10. +
+
+

fecher_friesike +

In Bartling & Friesike (2014) Open Science: One Term, Five Schools of Thought

+
+

While many researchers may be motivated by one or more of these aspects, we will not necessarily focus on any of them in particular. If anything, FOSS may be slightly more in the Infrastructure school, because we aim to give you the tools to do Open Science based on your own underlying motivations.

+

Breakout Discussion 2

+

Let's break out into groups again to discuss some of our motivations for doing Open Science.

+
+What motivates you to do Open Science? +
+
+Do you feel that you fall into a particular "school"? If so, which one, and why? +
+
+Are there any motivating factors for doing Open Science that don't fit into this framework? +
+

Ethos of Open Science

+

Doing Open Science requires us to understand the ethics of why working with data which do not belong to us is privileged.

+

We must also anticipate how these could be re-used in ways contrary to the interests of humanity.

+

Ensure the use of Institutional Review Boards (IRB) or your local ethical committee.

+

Areas to consider:

+

ethics assessment

+

Source: UK Statistics Authority

+ +

"Nothing about us, without us"

+ +

For more information (training):

+

January in Tucson - intensive education session brings together distinguished faculty in the field of Indigenous governance and Indigenous rights, and gives them the opportunity to teach and hold discussions with Indigenous leaders, practitioners, and community members, and anyone interested in Indigenous affairs.

+

Ethics and Data Access (General Information with BioMedical and Life Sciences Data) includes a legal and ethical checklist lesson for researchers around "FAIR Plus".

+ +

The Turing Way +NASA Transform to Open Science +Foster Open Science +The Carpentries +COS

+

Open Scholarship Grassroots Community Networks

+
+ International Open Science Networks +

Center for Scientific Collaboration and Community Engagement (CSCCE)

+

Center for Open Science (COS)

+

Eclipse Science Working Group

+

eLife

+

NumFocus

+

Open Access Working Group

+

Open Research Funders Group

+

Open Science Foundation

+

Open Science Network

+

pyOpenSci

+

R OpenSci

+

Research Data Alliance (RDA)

+

The Turing Way

+

UNESCO Global Open Science Partnership

+

World Wide Web Consortium (W3C)

+
+
+ US-based Open Science Networks +

CI Compass - provides expertise and active support to cyberinfrastructure practitioners at USA NSF Major Facilities in order to accelerate the data lifecycle and ensure the integrity and effectiveness of the cyberinfrastructure upon which research and discovery depend.

+

Earth Science Information Partners (ESIP) Federation - is a 501©(3) nonprofit supported by NASA, NOAA, USGS and 130+ member organizations.

+

Internet2 - is a community providing cloud solutions, research support, and services tailored for Research and Education.

+

Minority Serving Cyberinfrastructure Consortium (MS-CC) envisions a transformational partnership to promote advanced cyberinfrastructure (CI) capabilities on the campuses of Historically Black Colleges and Universities (HBCUs), Hispanic-Serving Institutions (HSIs), Tribal Colleges and Universities (TCUs), and other Minority Serving Institutions (MSIs).

+

NASA Transform to Open Science (TOPS) - coordinates efforts designed to rapidly transform agencies, organizations, and communities for Earth Science

+

OpenScapes - is an approach for doing better science for future us

+

The Quilt - non-profit regional research and education networks collaborate to develop, deploy and operate advanced cyberinfrastructure that enables innovation in research and education.

+
+
+ Oceania Open Science Networks +

New Zealand Open Research Network - New Zealand Open Research Network (NZORN) is a collection of researchers and research-associated workers in New Zealand.

+

Australia & New Zealand Open Research Network - ANZORN is a network of local networks distributed without Australia and New Zealand.

+
+
+

Self Assessment

+
+True or False: All research papers published in the top journals, like Science and Nature, are always Open Access? +
+Answer +

False

+

Major Research journals like Science and Nature have an "Open Access" option when a manuscript is accepted, but they charge an extra fee to the authors to make those papers Open Access.

+

These high page costs are exclusionary to the majority of global scientists who cannot afford to front these costs out of pocket.

+

This will soon change, at least in the United States. The Executive Branch of the federal government recently mandated that future federally funded research be made Open Access after 2026.

+
+
+
+True or False: an article states all of the research data used in the experiments "are available upon request from the corresponding author(s)," meaning the data are "Open" +
+Answer +

False

+

In order for research to be open, the data need to be freely available from a digital repository, like Data Dryad, Zenodo.org, or CyVerse.

+

Data that are 'available upon request' do not meet the FAIR data principles.

+
+
+
+True or False: Online Universities and Data Science Boot Camps like UArizona Online, Coursera, Udemy, etc. promote digital literacy and are Open Educational Resources? +
+Answer +

False

+

These examples are for-profit programs which teach data science and computer programming online. Some may be official through public or private universities and offer credits toward a degree or a certificate. Some of these programs are known to be predatory.

+

The organizations we have listed above are Open Educational Resources - they are free and available to anyone who wants to work with them asynchronously, virtually, or in person.

+
+
+
+Using a version control system to host the analysis code and computational notebooks, and including these in your Methods section or Supplementary Materials, is an example of an Open Methodology? +
+Answer +

Yes!

+

Using a VCS like GitHub or GitLab is a great step towards making your research more reproducible.

+

Ways to improve your open methology can include documentation of your physical bench work, and even video recordings and step-by-step guides for every part of your project.

+
+
+
+You are asked to review a paper for an important journal in your field. The editor asks if you're willing to release your identity to the authors, thereby "signing" your review. Is this an example of "Open Peer Review"? +
+Answer +

No

+

Just because you've given your name to the author(s) of the manuscript, this does not make your review open.

+

If the journal later publishes your review alongside the final manuscript, than you will have participated in an Open Review.

+
+
+
+You read a paper where the author(s) wrote their own code and licensed as "Open Source" software for a specific set of scientific tasks which you want to replicate. When you visit their personal website, you find the GitHub repository does not exist (because its now private). You contact the authors asking for access, but they refuse to share it 'due to competing researchers who are seeking to steal their intellectual property". Is the software open source? +
+Answer +

No

+

Just because an author states they have given their software a permissive software license, does not make the software open source.

+

Always make certain there is a LICENSE associated with any software you find on the internet.

+

In order for the software to be open, it must follow the Open Source Initiative definition

+
+
+ +
+
+ + + Last update: + 2023-10-18 + + +
+ + + + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/02_project_management/02_project_management.md b/02_project_management/02_project_management.md new file mode 100644 index 000000000..8ab6efc3e --- /dev/null +++ b/02_project_management/02_project_management.md @@ -0,0 +1,447 @@ +# Introduction to Project Management + +!!! Success "Learning Objectives" + + After this lesson, you should be able to: + + * Discuss different levels of project management + * Describe tools and approaches to managing collaborative projects + * Describe best practices for computational project organization + * Understand benefits of establishing project management practices from the start of a project until after it ends + +"**Project Management**" by itself may sound a bit vague and broad. + +??? Quote "Definition" + + "Project management is the use of specific knowledge, skills, tools and techniques to deliver something of value to people. The development of software for an improved business process, the construction of a building, the relief effort after a natural disaster, the expansion of sales into a new geographic market—these are all examples of projects." - [Project Management Institute](https://www.pmi.org/about/learn-about-pmi/what-is-project-management){target=_blank} + + [:material-wikipedia: Wikipedia definition](https://en.wikipedia.org/wiki/Project_management){target=_blank} + +Here we use the term in two different contexts. + +1. First, we'll go over the project management of scientific labs, groups, and projects, talking about things like governance, how to develop operations manuals, laying out roles and responsibilities, planning steps and the workflows which connect them. + +2. Next, we'll go over project management as "[research objects](https://en.wikipedia.org/wiki/Research_Object){target=_blank}": making sure your data, code, and documents are well-organized. These are crucial for future topics like version control and reproducibility. + +## 1. Classic Project Management + +This type of overall project management may be required for some grants, and while it may be tempting to put in the minimal effort on one of the many pieces of paperwork you're required to complete, this type of overall project planning can be very useful. + +??? Info "Traditional Organizations" + + Major research (R1) universities are organized around hierarchical frameworks, often described using an Organizational Chart. + + [![UMaine Organization Chart](https://www.maine.edu/unified-accreditation/wp-content/uploads/sites/158/2022/06/UMaine-org-chart-05-18-22.png){ width="600" }](https://www.maine.edu/unified-accreditation/self-study-draft/university-of-maine-organizational-chart/){target=_blank} + + Scientific Research Projects are generally organized around a "[Principal Investigator](https://seedfund.nsf.gov/fastlane/definitions/){target=_blank}" with "Co-Principal Investigators" and "Senior Personnel" in supporting roles. Postdoctoral Researchers and gradaute students are often employed by research projects as part of their continued education and "professional preparation". + + [![AIIRA Management Team](https://aiira.iastate.edu/images/aiira-team.png){ width="600" }](https://aiira.iastate.edu/about-us/faculty-team/){target=_blank} + + Given the nebulous breakdown of authority within lab groups and small research projects, the organization and governance of teams can be difficult to determine from the outside perspective. Indeed, internally team members on projects often do not know who is in charge or who reports to whom. + +The Turing Way offer a [lesson on Project Design](https://the-turing-way.netlify.app/project-design/project-design.html){target=_blank} related to effective project planning and management. + +### Project Governance + +??? Quote "Definitions" + + Project Governance is the set of rules, procedures and policies that determine how projects are managed and overseen. + + "The set of policies, regulations, functions, processes, and procedures and responsibilities that define the establishment, management and control of projects, programmes or portfolios." - [APM (2012)](https://www.academia.edu/44132624/APM_BODY_OF_KNOWLEDGE){target=_blank}, [open.edu](https://www.open.edu/openlearn/mod/oucontent/view.php?id=27294&printable=1){target=_blank} + + [:material-wikipedia: Wikipedia Definition](https://en.wikipedia.org/wiki/Project_governance){target=_blank} + +No matter how small, i.e., even single person-run projects, a good Project Governance structure can help keep work on track and headed toward a timely finish. + +Establishing a project governance document at the onset of a project is a good way of setting boundaries, roles and responsibilities, pre-registration about what deliverables are expected, and what the consequences will be for breaking trust. + +??? Tip "Example Governance Documents" + + [Munoz-Torres et al. 2020](https://zenodo.org/record/3839120#.YyUJA-zMLzd){target=_blank} + +### Research Collaborations + +[Sahneh & Balk et al. (2020)](https://doi.org/10.1371/journal.pcbi.1008879){target=_blank} Ten simple rules to cultivate transdisciplinary collaboration in data science, discuss the interactions amongst teams of diverse researchers. + +
+ ![10 simple rules](https://journals.plos.org/ploscompbiol/article/figure/image?size=large&id=10.1371/journal.pcbi.1008879.g001){ width="600" } +
[Sahneh & Balk et al. (2020)](https://doi.org/10.1371/journal.pcbi.1008879){target=_blank} Fig 1. How the rules work together and intersect. There are multiple components in collaborations: person–person interactions, person–technology interactions, person–data interactions, and data–technology interactions. Synergy between these components results in a successful collaboration.
+
+ + +### Team Roles and Responsibilities + +It can be easy for certain tasks to slip through the cracks. Established roles and responsibilities of teams can help ensure nobody gets saddled with too much work, and reduces chances of disputes among collaborators. + +??? Tip "Project Management Professional (PMP)®" + + A [Project Management Professional (PMP)®](https://www.pmi.org/certifications/project-management-pmp){target=_blank} certification has been embraced globally as adding value to your professional resume. + + Academia has also embraced PMP certification as part of continuing education for academic staff and faculty. + + [University of Arizona PMP prep](https://ce.arizona.edu/classes/fundamentals-project-management-pmp-exam-prep){target=_blank} + + +??? Example "Team roles and titles" + + Again, The Turing Way provide an excellent set of examples of [infrastructure job titles and roles](https://the-turing-way.netlify.app/collaboration/research-infrastructure-roles.html){target=_blank} on software driven projects: + + **Community Manager** - "responsibilities include establishing engagement, organising community spaces and events, supporting people through inclusive practices, developing and maintaining resources, growing and evaluating use cases and collaborating with people involved in research and scientific communities." ([1](https://the-turing-way.netlify.app/collaboration/research-infrastructure-roles/community-manager.html#community-managers-overview){target=_blank}, [2](https://www.turing.ac.uk/research/research-programmes/tools-practices-and-systems/community-management-and-open-research){target=_blank}) + +
+ ![community manager](https://www.turing.ac.uk/sites/default/files/inline-images/tps%20graphic_0.jpg){ width="400" } +
This image was created by [Scriberia](https://www.scriberia.com/){target=_blank} for The Turing Way community and is used under a CC-BY 4.0 licence.
+
+ + **[Data Science Educator](https://datascienceineducation.com/c03.html){target=_blank}** - "... data science in education refers to the application of data science methods, while other times it refers to data science as a context for teaching and learning" [Rosenberg et al. (2020)](https://www.taylorfrancis.com/chapters/edit/10.4324/9780429260919-7/making-data-science-count-education-joshua-rosenberg-michael-lawson-daniel-anderson-ryan-seth-jones-teomara-rutherford){target=_blank}, [Estrellado et al.](https://datascienceineducation.com/c03.html){target=_blank} + + **[Data Scientist](https://en.wikipedia.org/wiki/Data_science){target=_blank}** - a professional who uses analytical, statistical, and programming skills to collect, analyze, and describe data. + + **[Data Steward](https://the-turing-way.netlify.app/collaboration/research-infrastructure-roles/data-steward.html?highlight=data%20steward){target=_blank}** - "... responsible for ensuring the quality and fitness for purpose of the organization's data assets, including the metadata for those data assets." - [Wikipedia]() + + **[Developer Advocate](){target=_blank}** - sometimes called [platform evangelism](https://en.wikipedia.org/wiki/Platform_evangelism), advocates represent the voice of the user (in the case of open science, the scientists) internally to the project team or company, and the voice of the project or company externally to the public. + + **[DevOps Engineer](https://en.wikipedia.org/wiki/DevOps){target=_blank}** - a combinination of software development "Dev" and IT operations "Ops", responsibilities focus on "[continuous delivery](https://en.wikipedia.org/wiki/Continuous_delivery){target=_blank}" and [agile software development](https://en.wikipedia.org/wiki/Agile_software_development){target=_blank} + + **[Research Application Manager (RAM)](https://the-turing-way.netlify.app/collaboration/research-infrastructure-roles/ram.html){target=_blank}** - in some ways a combination of Community Manager and Developer Advocate, + +
+ ![ram](https://the-turing-way.netlify.app/_images/research-application-managers.jpg){ width="600" } +
Fig. 94 Research Application Managers work with the research team to embed outputs into user organisations. The Turing Way Community, & Scriberia. (2020, November). Illustrations from the Turing Way book dashes. Zenodo. http://doi.org/10.5281/zenodo.4323154
+
+ + **[Research Software Engineer](https://us-rse.org/about/what-is-an-rse/){target=_blank}** - those who regularly use expertise in programming to advance research - [US Research Software Engineer (US-RSE) Association](https://us-rse.org/){target=_blank} + +#### Open Source Research Software Maintainer + +Becoming an open source software maintainer is not to be taken lightly. + +
+ ![xkcd](https://imgs.xkcd.com/comics/dependency.png){ width="400" } +
Image Credit: [XKCD Dependency](https://m.xkcd.com/2347/){target=_blank}
+
+ +When you create a new software, library, or package, you are becoming its parent and guardian. + +### Development Methodology + +
+![agile](https://upload.wikimedia.org/wikipedia/commons/d/da/Waterfallvsagile.jpg){ width="400" } +
the "leaps of faith" required in Agile vs Waterfall. Image Credit: Wikimedia Commons CC BY 4.0
+
+ +In software development, there are two common methologies which have similar applications to a research project: + +- [:material-wikipedia: Agile](https://en.wikipedia.org/wiki/Agile_software_development) + - [Scrum](https://www.atlassian.com/agile/scrum) + - [Kanban](https://www.atlassian.com/agile/kanban) +- [:material-wikipedia: Waterfall](https://en.wikipedia.org/wiki/Waterfall_model) + +
+![agile](https://upload.wikimedia.org/wikipedia/commons/4/49/Waterfall_vs_agile-1.png){ width="800" } +
the effort distribtion of Agile vs Waterfall. Image Credit: Wikimedia Commons CC BY 4.0
+
+ +!!! info "Comparisons between methodologies" + - [LucidChart Blog: Agile vs Waterfall vs Kanban vs Scrum ](https://www.lucidchart.com/blog/agile-vs-waterfall-vs-kanban-vs-scrum) + + - [Ontology of Value: Agile vs Waterfall vs Kanban vs Scrum](https://ontologyofvalue.com/project-management-key-concepts-agile-kanban-scrum-waterfall/) + + +### Breakout Discussion + +Now we will do a breakout discussion section to talk about overall project management. + +!!! Question "What is an example of a poorly managed project you were involved in? What contributed to this feeling?" + +!!! Question "Why do you think effective project management is important to Open Science?" + +!!! Question "What are some limitations to you, your lab/group, or your domain?" + +--- + +## 2. Research Objects + +??? Quote "Definition" + + "A workflow-centric research object bundles a workflow, the provenance of the results obtained by its enactment, other digital objects that are relevant for the experiment (papers, datasets, etc.), and annotations that semantically describe all these objects." - [Corcho et al. 2012](https://oa.upm.es/20401/){target=_blank} + + "... semantically rich aggregations of resources, that can possess some scientific intent or support some research objective." - [Bechhofer et al. 2010](https://doi.org/10.1038/npre.2010.4626.1){target=_blank} + + [:material-wikipedia: Wikipedia definition](https://en.wikipedia.org/wiki/Research_Object){target=_blank} + +When we talk about project management in this section, we mean the way you organize data, code, images, documents, and documentation within a project. One way to think about this is in the context of "[research objects](https://en.wikipedia.org/wiki/Research_Object){target=_blank}" which condense into a single end point (think: a URL like a digital object identifier (DOI)) where others can come to reproduce your research. + +??? Example "Examples of Research Objects" + + [Boettiger 2018](https://github.com/cboettig/noise-phenomena){target=blank} + + [Gillan et al. 2021](https://datacommons.cyverse.org/browse/iplant/home/shared/commons_repo/curated/Gillan_Ecosphere_2021){target=blank} + +
+ ![ro](./assets/RO_crate.png){ width="800" } +
Research Objects from ResearchObject.org
+
+ + +??? List "Research Object Services" + + [ResearchObject](https://www.researchobject.org/){target=_blank} + + [ROHub](https://reliance.rohub.org/){target=_blank} - [Garcia-Silva et al. 2019](https://doi.org/10.1016/j.future.2019.03.046){target=_blank} + +### Research Object Organization + +If you've ever had to navigate someone else's computer or a GitHub repository, you probably know that a poorly organized project can greatly reduce its accessibility. On the other hand, a well-organized project can: + +- make your work more accessible to others +- help collaborators effectively contribute to your project +- ease the growing pains of a rapidly scaling project +- make life much easier for your future self + +It can be easy to overlook sound project management, opting for a "just get it done ASAP" approach to your work, but this almost always costs you more time in the end. The best time to introduce good project management is at the start of a project, and the second best time is right now. + +
+ ![ro](assets/organizing_searching_tradeoff.png){ width="600" } +
An hour spent reorganizing a project today may save you days of headaches later on.
+
+ + +#### Organization Examples + +- Example data project organization from [UArizona Libraries](https://data.library.arizona.edu/data-management/best-practices/data-project-organization) +- [CookieCutter Templates](https://github.com/topics/cookiecutter) + +Example project structure: + +``` +. +├── AUTHORS.md +├── LICENSE +├── README.md +├── bin <- Your compiled model code can be stored here (not tracked by git) +├── config <- Configuration files, e.g., for doxygen or for your model if needed +├── data +│ ├── external <- Data from third party sources. +│ ├── interim <- Intermediate data that has been transformed. +│ ├── processed <- The final, canonical data sets for modeling. +│ └── raw <- The original, immutable data dump. +├── docs <- Documentation, e.g., doxygen or scientific papers (not tracked by git) +├── notebooks <- Ipython or R notebooks +├── reports <- For a manuscript source, e.g., LaTeX, Markdown, etc., or any project reports +│   └── figures <- Figures for the manuscript or reports +└── src <- Source code for this project + ├── data <- scripts and programs to process data + ├── external <- Any external source code, e.g., pull other git projects, or external libraries + ├── models <- Source code for your own model + ├── tools <- Any helper scripts go here + └── visualization <- Scripts for visualisation of your results, e.g., matplotlib, ggplot2 related. + +``` + +??? Tip "Best Practices" + + 1. Projects should be self-contained + - this is probably the most important concept + - strictly necessary for version control + - use relative paths + + 2. Use structure to organize files + + 3. Don't underestimate complexity + + 4. Keep raw data raw + + 5. Treat generated output as disposable + + 6. Avoid manual (point-and-click) steps as much as possible + - if necessary, record in detail + - should also be recorded in prior and subsequent steps + + 7. Avoid spaces in file and folder names + - consider `snake_case` `camelCase` `PascalCase` `kebab-case` instead + + 8. Describe structure in README + + 9. The best time to organize is at the start, the 2nd best is right now + + 10. Reorganize if necessary, but don't overdo it + + 11. Using same basic structure can help you navigate new/old projects + + +??? Info "Automate the creation a working directory" + + You might find a nice basic structure that works as a good starting place for many of your projects, or smaller components of big projects. + + Instead of having to repeat the process of making that directory structure, which could be tedious and introduce mistakes, you could write some code to do it for you. + + The following is a `bash` script that takes one argument, the name of the new project (with no spaces), and creates that project with a premade directory structure for you to put files into. + + ``` {bash} + + #!/usr/bin/env bash + + # Run this script with the name of the new project as + # an argument, like so: `bash make_project.sh my_project` + # It will generate a project with the following structure: + + #. + #|-- README.md + #|-- data + #| |-- cleaned + #| `-- raw + #|-- images + #|-- reports + #`-- scripts + + mkdir "$1" + + cd "$1" || exit + + echo "# $1" >> README.md + + mkdir data + + mkdir data/raw + + mkdir data/cleaned + + mkdir scripts + + mkdir images + + mkdir reports + ``` + + This approach to automating repetitive tasks is something we'll dig into even deeper in later lessons. + +??? Tip "Productivity Software" + + [:material-shield-key: CryptPad](https://cryptpad.fr/){target=_blank} - online rich text pad. + + [:material-pencil: Draw.io](https://about.draw.io/){target=_blank} - drawings and diagrams in browser. + + [:material-file-excel: Excel](https://www.microsoft.com/en-us/microsoft-365/excel){target=_blank} - love it or hate it, many people still work in it or with `.xlsx` format files. + + [:material-google-drive: Google Docs](https://www.google.com/docs/about/){target=_blank} - is an online word processor included as part of the free, web-based Google Docs Editors suite offered by Google. + + [:octicons-markdown-16: HackMD](https://hackmd.io){target=_blank} - online markdown editor. + + [:octicons-markdown-16: JupyterBook](https://jupyterbook.org/en/stable/intro.html){target=_blank} - create documentation using Jupyter Notebooks and Markdown + + [:octicons-markdown-16: MkDocs](https://www.mkdocs.org/){target=_blank} - is a fast, simple and downright gorgeous static site generator that's geared towards building project documentation. + + [:material-feather: LaTeX](https://www.latex-project.org/){target=_blank} - is a high-quality typesetting system + + [:material-leaf: Overleaf](https://www.overleaf.com/){target=_blank} - LaTeX online document sharing platform. + + [:octicons-markdown-16: ReadTheDocs](https://readthedocs.com/){target=_blank} - documentation using a variety of Markup langages + + [Software Heritage](https://www.softwareheritage.org/){target=_blank} - preserves software source code for present and future generations. + +??? Tip ":octicons-project-24: Project Management Software" + + [OSF.io]() + + - Examples + + [:material-atlassian: Atlassian](https://www.atlassian.com/){target=_blank} + + - [:fontawesome-brands-confluence: Confluence](https://www.atlassian.com/software/confluence){target=_blank} + + - [:material-jira: Jira](https://www.atlassian.com/software/jira){target=_blank} + + - [:material-trello: Trello](https://trello.com/en){target=_blank} + + [:material-github: GitHub Issues](https://github.com/features/issues){target=_blank} + + [Open Project](https://www.openproject.org/){target=_blank} + + [:octicons-git-merge-24: ZenHub](https://www.zenhub.com/){target=_blank} + + [Basecamp](https://basecamp.com/){target=_blank} + + + + +### Breakout Discussion + +Now we will do a breakout discussion section to talk about research objects + +!!! Question "Who here has created a research object or attempted to?" + +!!! Question "Do you think someone could reproduce your research by accessing your research object?" + +!!! Question "Where might a research object not work for your research?" + +!!! Question "What would a research object look like for your research?" + +--- + +### Other Resources + +There are many other resources on more specific elements of project +management. We'll link to some of them here. + +- Using R Projects with RStudio: https://support.rstudio.com/hc/en-us/articles/200526207-Using-Projects +- Using the R package `here`: and +- An even more compartmentalized approach to project management: + +## Self Assessment + +??? Question "Why is Project Management used in research?" + + 1. Reduces [wasted] effort + + 2. Tracks progress or identifies more quickly when there is a lack of progress + + 3. Establishes a formal structure for teams + +??? Question "What are established roles and responsibilities of collaborative teams?" + + !!! Info "Example 1: Traditional University Research Teams" + + i. Principal Investigator, Co-Principal Investigators + + ii. Senior Personnel, Postdoctoral Researchers, Bench Scientists + + iii. (Graduate) Students + + !!! Info "Example 2: Research Infrastructure Teams" + + [Research infrastructure job titles and roles (Turing Way)](https://the-turing-way.netlify.app/collaboration/research-infrastructure-roles.html){target=_blank} + + i. Community Managers + + ii. Data Science Educators + + iii. Data Scientists + + iv. Developer Advocates + + iv. Research Software Engineers + + +??? Question "What are some uses of a Project Governance Document?" + + ??? Success "Answers" + + * Sets expectations for behavior and operations + + * Establishes roles and responsibilities of PI, staff, and senior personnel + + * Uses Pre-registration techniques about what deliverables are expects, and by when + + * Establishes what consequences will be for breaking trust + + +??? Question "Research Objects must include all components of research: governance document, manuals, documentation, research papers, analysis code, data, software containers" + + ??? Success "Answers" + + While a Research Object (RO) *may* include the entire kitchen sink from a research project, it does NOT always contain all of these things. + + Fundamentally, a RO should contain enough information and detail to reproduce a scientific study from its linked or self-contained parts. + + Components like large datasets may not be a part of the RO, but the code or analysis scripts should have the ability to connect to or stream those data. diff --git a/02_project_management/index.html b/02_project_management/index.html new file mode 100644 index 000000000..db1d76aea --- /dev/null +++ b/02_project_management/index.html @@ -0,0 +1,1644 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 3. Project Management - CyVerse Foundational Open Science Skills 2023 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Introduction to Project Management

+
+

Learning Objectives

+

After this lesson, you should be able to:

+
    +
  • Discuss different levels of project management
  • +
  • Describe tools and approaches to managing collaborative projects
  • +
  • Describe best practices for computational project organization
  • +
  • Understand benefits of establishing project management practices from the start of a project until after it ends
  • +
+
+

"Project Management" by itself may sound a bit vague and broad.

+
+Definition +

"Project management is the use of specific knowledge, skills, tools and techniques to deliver something of value to people. The development of software for an improved business process, the construction of a building, the relief effort after a natural disaster, the expansion of sales into a new geographic market—these are all examples of projects." - Project Management Institute

+

Wikipedia definition

+
+

Here we use the term in two different contexts.

+
    +
  1. +

    First, we'll go over the project management of scientific labs, groups, and projects, talking about things like governance, how to develop operations manuals, laying out roles and responsibilities, planning steps and the workflows which connect them.

    +
  2. +
  3. +

    Next, we'll go over project management as "research objects": making sure your data, code, and documents are well-organized. These are crucial for future topics like version control and reproducibility.

    +
  4. +
+

1. Classic Project Management

+

This type of overall project management may be required for some grants, and while it may be tempting to put in the minimal effort on one of the many pieces of paperwork you're required to complete, this type of overall project planning can be very useful.

+
+Traditional Organizations +

Major research (R1) universities are organized around hierarchical frameworks, often described using an Organizational Chart.

+

UMaine Organization Chart

+

Scientific Research Projects are generally organized around a "Principal Investigator" with "Co-Principal Investigators" and "Senior Personnel" in supporting roles. Postdoctoral Researchers and gradaute students are often employed by research projects as part of their continued education and "professional preparation".

+

AIIRA Management Team

+

Given the nebulous breakdown of authority within lab groups and small research projects, the organization and governance of teams can be difficult to determine from the outside perspective. Indeed, internally team members on projects often do not know who is in charge or who reports to whom.

+
+

The Turing Way offer a lesson on Project Design related to effective project planning and management.

+

Project Governance

+
+Definitions +

Project Governance is the set of rules, procedures and policies that determine how projects are managed and overseen.

+

"The set of policies, regulations, functions, processes, and procedures and responsibilities that define the establishment, management and control of projects, programmes or portfolios." - APM (2012), open.edu

+

Wikipedia Definition

+
+

No matter how small, i.e., even single person-run projects, a good Project Governance structure can help keep work on track and headed toward a timely finish.

+

Establishing a project governance document at the onset of a project is a good way of setting boundaries, roles and responsibilities, pre-registration about what deliverables are expected, and what the consequences will be for breaking trust.

+
+Example Governance Documents +

Munoz-Torres et al. 2020

+
+

Research Collaborations

+

Sahneh & Balk et al. (2020) Ten simple rules to cultivate transdisciplinary collaboration in data science, discuss the interactions amongst teams of diverse researchers.

+
+

10 simple rules +

Sahneh & Balk et al. (2020) Fig 1. How the rules work together and intersect. There are multiple components in collaborations: person–person interactions, person–technology interactions, person–data interactions, and data–technology interactions. Synergy between these components results in a successful collaboration.

+
+

Team Roles and Responsibilities

+

It can be easy for certain tasks to slip through the cracks. Established roles and responsibilities of teams can help ensure nobody gets saddled with too much work, and reduces chances of disputes among collaborators.

+
+Project Management Professional (PMP)® +

A Project Management Professional (PMP)® certification has been embraced globally as adding value to your professional resume.

+

Academia has also embraced PMP certification as part of continuing education for academic staff and faculty.

+

University of Arizona PMP prep

+
+
+Team roles and titles +

Again, The Turing Way provide an excellent set of examples of infrastructure job titles and roles on software driven projects:

+

Community Manager - "responsibilities include establishing engagement, organising community spaces and events, supporting people through inclusive practices, developing and maintaining resources, growing and evaluating use cases and collaborating with people involved in research and scientific communities." (1, 2)

+

+ community manager +
This image was created by Scriberia for The Turing Way community and is used under a CC-BY 4.0 licence.
+

+

Data Science Educator - "... data science in education refers to the application of data science methods, while other times it refers to data science as a context for teaching and learning" Rosenberg et al. (2020), Estrellado et al.

+

Data Scientist - a professional who uses analytical, statistical, and programming skills to collect, analyze, and describe data.

+

Data Steward - "... responsible for ensuring the quality and fitness for purpose of the organization's data assets, including the metadata for those data assets." - Wikipedia

+

Developer Advocate - sometimes called platform evangelism, advocates represent the voice of the user (in the case of open science, the scientists) internally to the project team or company, and the voice of the project or company externally to the public.

+

DevOps Engineer - a combinination of software development "Dev" and IT operations "Ops", responsibilities focus on "continuous delivery" and agile software development

+

Research Application Manager (RAM) - in some ways a combination of Community Manager and Developer Advocate,

+

+ ram +
Fig. 94 Research Application Managers work with the research team to embed outputs into user organisations. The Turing Way Community, & Scriberia. (2020, November). Illustrations from the Turing Way book dashes. Zenodo. http://doi.org/10.5281/zenodo.4323154
+

+

Research Software Engineer - those who regularly use expertise in programming to advance research - US Research Software Engineer (US-RSE) Association

+
+

Open Source Research Software Maintainer

+

Becoming an open source software maintainer is not to be taken lightly.

+
+

xkcd +

Image Credit: XKCD Dependency

+
+

When you create a new software, library, or package, you are becoming its parent and guardian.

+

Development Methodology

+
+

agile

+
the "leaps of faith" required in Agile vs Waterfall. Image Credit: Wikimedia Commons CC BY 4.0
+
+

In software development, there are two common methologies which have similar applications to a research project:

+ +
+

agile

+
the effort distribtion of Agile vs Waterfall. Image Credit: Wikimedia Commons CC BY 4.0
+
+
+

Comparisons between methodologies

+
- [LucidChart Blog: Agile vs Waterfall vs Kanban vs Scrum ](https://www.lucidchart.com/blog/agile-vs-waterfall-vs-kanban-vs-scrum)
+
+- [Ontology of Value: Agile vs Waterfall vs Kanban vs Scrum](https://ontologyofvalue.com/project-management-key-concepts-agile-kanban-scrum-waterfall/)
+
+
+

Breakout Discussion

+

Now we will do a breakout discussion section to talk about overall project management.

+
+

What is an example of a poorly managed project you were involved in? What contributed to this feeling?

+
+
+

Why do you think effective project management is important to Open Science?

+
+
+

What are some limitations to you, your lab/group, or your domain?

+
+
+

2. Research Objects

+
+Definition +

"A workflow-centric research object bundles a workflow, the provenance of the results obtained by its enactment, other digital objects that are relevant for the experiment (papers, datasets, etc.), and annotations that semantically describe all these objects." - Corcho et al. 2012

+

"... semantically rich aggregations of resources, that can possess some scientific intent or support some research objective." - Bechhofer et al. 2010

+

Wikipedia definition

+
+

When we talk about project management in this section, we mean the way you organize data, code, images, documents, and documentation within a project. One way to think about this is in the context of "research objects" which condense into a single end point (think: a URL like a digital object identifier (DOI)) where others can come to reproduce your research.

+
+Examples of Research Objects +

Boettiger 2018

+

Gillan et al. 2021

+
+
+

ro +

Research Objects from ResearchObject.org

+
+
+Research Object Services +

ResearchObject

+

ROHub - Garcia-Silva et al. 2019

+
+

Research Object Organization

+

If you've ever had to navigate someone else's computer or a GitHub repository, you probably know that a poorly organized project can greatly reduce its accessibility. On the other hand, a well-organized project can:

+
    +
  • make your work more accessible to others
  • +
  • help collaborators effectively contribute to your project
  • +
  • ease the growing pains of a rapidly scaling project
  • +
  • make life much easier for your future self
  • +
+

It can be easy to overlook sound project management, opting for a "just get it done ASAP" approach to your work, but this almost always costs you more time in the end. The best time to introduce good project management is at the start of a project, and the second best time is right now.

+
+

ro +

An hour spent reorganizing a project today may save you days of headaches later on.

+
+

Organization Examples

+ +

Example project structure:

+
.
+├── AUTHORS.md
+├── LICENSE
+├── README.md
+├── bin                <- Your compiled model code can be stored here (not tracked by git)
+├── config             <- Configuration files, e.g., for doxygen or for your model if needed
+├── data
+│   ├── external       <- Data from third party sources.
+│   ├── interim        <- Intermediate data that has been transformed.
+│   ├── processed      <- The final, canonical data sets for modeling.
+│   └── raw            <- The original, immutable data dump.
+├── docs               <- Documentation, e.g., doxygen or scientific papers (not tracked by git)
+├── notebooks          <- Ipython or R notebooks
+├── reports            <- For a manuscript source, e.g., LaTeX, Markdown, etc., or any project reports
+│   └── figures        <- Figures for the manuscript or reports
+└── src                <- Source code for this project
+    ├── data           <- scripts and programs to process data
+    ├── external       <- Any external source code, e.g., pull other git projects, or external libraries
+    ├── models         <- Source code for your own model
+    ├── tools          <- Any helper scripts go here
+    └── visualization  <- Scripts for visualisation of your results, e.g., matplotlib, ggplot2 related.
+
+
+Best Practices +
    +
  1. +

    Projects should be self-contained

    +
      +
    • this is probably the most important concept
    • +
    • strictly necessary for version control
    • +
    • use relative paths
    • +
    +
  2. +
  3. +

    Use structure to organize files

    +
  4. +
  5. +

    Don't underestimate complexity

    +
  6. +
  7. +

    Keep raw data raw

    +
  8. +
  9. +

    Treat generated output as disposable

    +
  10. +
  11. +

    Avoid manual (point-and-click) steps as much as possible

    +
      +
    • if necessary, record in detail
    • +
    • should also be recorded in prior and subsequent steps
    • +
    +
  12. +
  13. +

    Avoid spaces in file and folder names

    +
      +
    • consider snake_case camelCase PascalCase kebab-case instead
    • +
    +
  14. +
  15. +

    Describe structure in README

    +
  16. +
  17. +

    The best time to organize is at the start, the 2nd best is right now

    +
  18. +
  19. +

    Reorganize if necessary, but don't overdo it

    +
  20. +
  21. +

    Using same basic structure can help you navigate new/old projects

    +
  22. +
+
+
+Automate the creation a working directory +

You might find a nice basic structure that works as a good starting place for many of your projects, or smaller components of big projects.

+

Instead of having to repeat the process of making that directory structure, which could be tedious and introduce mistakes, you could write some code to do it for you.

+

The following is a bash script that takes one argument, the name of the new project (with no spaces), and creates that project with a premade directory structure for you to put files into.

+
#!/usr/bin/env bash
+
+# Run this script with the name of the new project as 
+# an argument, like so: `bash make_project.sh my_project`
+# It will generate a project with the following structure:
+
+#.
+#|-- README.md
+#|-- data
+#|   |-- cleaned
+#|   `-- raw
+#|-- images
+#|-- reports
+#`-- scripts
+
+mkdir "$1"
+
+cd "$1" || exit
+
+echo "# $1" >> README.md
+
+mkdir data
+
+mkdir data/raw
+
+mkdir data/cleaned
+
+mkdir scripts
+
+mkdir images
+
+mkdir reports
+
+

This approach to automating repetitive tasks is something we'll dig into even deeper in later lessons.

+
+
+Productivity Software +

CryptPad - online rich text pad.

+

Draw.io - drawings and diagrams in browser.

+

Excel - love it or hate it, many people still work in it or with .xlsx format files.

+

Google Docs - is an online word processor included as part of the free, web-based Google Docs Editors suite offered by Google.

+

HackMD - online markdown editor.

+

JupyterBook - create documentation using Jupyter Notebooks and Markdown

+

MkDocs - is a fast, simple and downright gorgeous static site generator that's geared towards building project documentation.

+

LaTeX - is a high-quality typesetting system

+

Overleaf - LaTeX online document sharing platform.

+

ReadTheDocs - documentation using a variety of Markup langages

+

Software Heritage - preserves software source code for present and future generations.

+
+
+ Project Management Software +

OSF.io

+
    +
  • Examples
  • +
+

Atlassian

+ +

GitHub Issues

+

Open Project

+

ZenHub

+

Basecamp

+
+

Breakout Discussion

+

Now we will do a breakout discussion section to talk about research objects

+
+

Who here has created a research object or attempted to?

+
+
+

Do you think someone could reproduce your research by accessing your research object?

+
+
+

Where might a research object not work for your research?

+
+
+

What would a research object look like for your research?

+
+
+

Other Resources

+

There are many other resources on more specific elements of project +management. We'll link to some of them here.

+ +

Self Assessment

+
+Why is Project Management used in research? +
    +
  1. +

    Reduces [wasted] effort

    +
  2. +
  3. +

    Tracks progress or identifies more quickly when there is a lack of progress

    +
  4. +
  5. +

    Establishes a formal structure for teams

    +
  6. +
+
+
+What are established roles and responsibilities of collaborative teams? +
+

Example 1: Traditional University Research Teams

+

i. Principal Investigator, Co-Principal Investigators

+

ii. Senior Personnel, Postdoctoral Researchers, Bench Scientists

+

iii. (Graduate) Students

+
+
+

Example 2: Research Infrastructure Teams

+

Research infrastructure job titles and roles (Turing Way)

+

i. Community Managers

+

ii. Data Science Educators

+

iii. Data Scientists

+

iv. Developer Advocates

+

iv. Research Software Engineers

+
+
+
+What are some uses of a Project Governance Document? +
+Answers +
    +
  • +

    Sets expectations for behavior and operations

    +
  • +
  • +

    Establishes roles and responsibilities of PI, staff, and senior personnel

    +
  • +
  • +

    Uses Pre-registration techniques about what deliverables are expects, and by when

    +
  • +
  • +

    Establishes what consequences will be for breaking trust

    +
  • +
+
+
+
+Research Objects must include all components of research: governance document, manuals, documentation, research papers, analysis code, data, software containers +
+Answers +

While a Research Object (RO) may include the entire kitchen sink from a research project, it does NOT always contain all of these things.

+

Fundamentally, a RO should contain enough information and detail to reproduce a scientific study from its linked or self-contained parts.

+

Components like large datasets may not be a part of the RO, but the code or analysis scripts should have the ability to connect to or stream those data.

+
+
+ +
+
+ + + Last update: + 2023-10-18 + + +
+ + + + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/03_managing_data/03_managing_data.md b/03_managing_data/03_managing_data.md new file mode 100644 index 000000000..35d32e765 --- /dev/null +++ b/03_managing_data/03_managing_data.md @@ -0,0 +1,607 @@ +# Managing Data + +!!! Success "Learning Objectives" + + After this lesson, you should be able to: + + * Recognize data as the foundation of open science and be able to describe the "life cycle of data" + * Use self-assessments to evaluate your current data management practices + * Cite tools and resources to improve your data management practices + * Know the biggest challenge to effective data management + +### Why should you care about data management? + +Ensuring that data are effectively organized, shared, and preserved +is critical to making your science impactful, efficient, and open. + +??? Warning + + The biggest challenge to data management is **making it an afterthought**. + + Unfortunately, poor data management doesn't have a high upfront cost. + You can do substantial work before realizing you are in trouble. Like + a swimmer in rip current, by the time you realize you are in trouble, + you may already be close to drowning. + + The solution? Make data management the first thing you consider when + starting a research project. It also needs to be a policy you + institute right away for your research group. + +!!! Question "How would you answer?" + - If you give your data to a colleague who has not been involved with your project, would they be able to make sense of it? Would they be able to use it properly? + - If you come back to your own data in five years, will you be able to make sense of it? Will you be able to use it properly? + - When you are ready to publish a paper, is it easy to find all the correct versions of all the data you used and present them in a comprehensible manner? + +Well-managed Data Sets: + +- Make life much easier for you and your collaborators +- Benefit the scientific research community by allowing others to reuse your data +- Are required by most funders and many journals +- Recent [Dear Colleague letter](https://www.nsf.gov/pubs/2019/nsf19069/nsf19069.jsp) from NSF +- NSF [proposal preparation guidelines](https://www.nsf.gov/pubs/policydocs/pappg19_1/pappg_11.jsp#XID4) + +--- + +### Data Self-assessment + +**Part I: Basic questions** + +Here are some questions about how you manage and work with data. + +!!! Question "Activity" + In small groups, discuss the following questions. You will be provided with a space for documenting our shared answers. + + 1\. What are the two or three data types that you most frequently work with? + - Think about the sources (observational, experimental, simulated, compiled/derived) + - Also consider the formats (tabular, sequence, database, image, etc.) + + 2\. What is the scale of your data? + + ??? Tip + We often talk about the scale of data using the ["Three V's"](https://www.bigdataframework.org/four-vs-of-big-data/): + + - Volume: Size of the data (MBs, GBs, TBs); can also include how many files (e.g dozens of big files, or millions of small ones) + - Velocity: How quickly are these data produced and analyzed? A lot coming in a single batch infrequently, or, a constant small amount of data that must be rapidly analyzed? + - Variety: How many different data types (raw files? databases?) + A fourth V (Veracity) captures the need to make decisions about data processing (i.e., separating low- and high-quality data) + + 3\. What is your strategy for storing and backing up your data? + + 4\. What is your strategy for verifying the integrity of your data? (i.e. verifying that your data has not be altered) + + 5\. What is your strategy for searching your data? + + 6\. What is your strategy for sharing (and getting credit for) your data? (i.e. How will do you share with your community/clients? How is that sharing documented? How do you evaluate the impact of data shared? ) + + +--- + +## Data Management Basics + +Let's learn a little more about data so that we can evaluate your +self-assessment responses. + +### Data Types + +Different types of data require different management practices. What are +some data types and sources you might use in your work? (Adapted from +DMP Tool [Data management general guidance](https://dmptool.org/general_guidance#types-of-data)) + +**Data Types** + +- Text: field or laboratory notes, survey responses +- Numeric: tables, counts, measurements +- Audiovisual: images, sound recordings, video +- Models, computer code +- Discipline-specific: FASTA in biology, FITS in astronomy, CIF in chemistry +- Instrument-specific: equipment outputs + +**Data Sources** + +*Observational* + +- Captured in real-time, typically outside the lab +- Usually irreplaceable and therefore the most important to safeguard +- Examples: Sensor readings, telemetry, survey results, images + +*Experimental* + +- Typically generated in the lab or under controlled conditions +- Often reproducible, but can be expensive or time-consuming +- Examples: gene sequences, chromatograms, magnetic field readings + +*Simulation* + +- Machine generated from test models +- Likely to be reproducible if the model and inputs are preserved +- Examples: climate models, economic models + +*Derived / Compiled* + +- Generated from existing datasets +- Reproducible, but can be very expensive and time-consuming +- Examples: text and data mining, compiled database, 3D models + +--- + +!!! Tip + **The Data Life Cycle** + + *Data management* is the set of practices that allow researchers to + effectively and efficiently handle data throughout the data life + cycle. Although typically shown as a circle (below) the actually + life cycle of any data item may follow a different path, with + branches and internal loops. Being aware of your data's future + helps you plan how to best manage them. + + ![lifecycle](assets/data_life_cycle.png) + + Image from [Strasser *et al*](https://www.dataone.org/sites/all/documents/DataONE_BP_Primer_020212.pdf). + + +### Best practices for the data life cycle + +The summary below is adapted from the excellent [DataONE best practices primer](https://old.dataone.org/sites/all/documents/DataONE_BP_Primer_020212.pdf). + +**Plan** + +- Describe the data that will be compiled, and how the data will be managed and made accessible throughout its lifetime +- A good plan considers each of the stages below + +??? Warning + + The biggest challenge to data management making it an afterthought. + + Unfortunately, poor data management doesn't have a high upfront cost. + You can do substantial work before realizing you are in trouble. Like + a swimmer in rip current, by the time you realize you are in trouble, + you may already be close to drowning. + + The solution? Make data management the first thing you consider when + starting a research project. It also needs to be a policy you + institute right away for your research group. + +**Collect** + +- Have a plan for data organization in place before collecting data +- Collect and store observation metadata at the same time you collect the metadata +- Take advantage of machine generated metadata + +**Assure** + +- Record any conditions during collection that might affect the quality of the data +- Distinguish estimated values from measured values +- Double check any data entered by hand +- Perform statistical and graphical summaries (e.g., max/min, average, range) to check for questionable or impossible values. +- Mark data quality, outliers, missing values, etc. + +**Describe:** + +- Comprehensive data documentation (i.e. metadata) is the key to + future understanding of data. Without a thorough description of + the context of the data, the context in which they were collected, + the measurements that were made, and the quality of the data, it + is unlikely that the data can be easily discovered, understood, or + effectively used. + +- Organize your data for publication. Before you can describe your + data, you must decide how to organize them. This should be planned + before hand, so that data organization is a minimal task at the + time of publication. + +- Thoroughly describe the dataset (e.g., name of dataset, list of + files, date(s) created or modified, related datasets) including + the people and organizations involved in data collection (e.g., + authors, affiliations, sponsor). Also include: + - An [ORCID](https://orcid.org/) (obtain one if you don't have one). + - The scientific context (reason for collecting the data, how they were collected, equipment and software used to generate the data, conditions during data collection, spatial and temporal resolution) + - The data themselves + - How each measurement was produced + - Units + - Format + - Quality assurance activities + - Precision, accuracy, and uncertainty + +Metadata standards and ontologies are invaluable for supporting data +reuse. Metadata standards tell you: + +- Which metadata attributes to include +- How to format your metadata +- What values are allowable for different attributes + +Some metadata standards you may want to consider: + +- [DataCite](https://schema.datacite.org/) for publishing data +- [Dublin Core](http://www.dublincore.org/specifications/dublin-core/dcmi-terms/) for sharing data on the web +- [MIxS](https://press3.mcs.anl.gov/gensc/mixs/) Minimum Information for any (x) sequence +- [OGC standards](https://www.opengeospatial.org/docs/is) for geospatial data + +Ontologies provide standardization for metadata values: + +- Example: [Environment Ontology](http://environmentontology.org/) terms for the MIxS standards +- Example: [Plant Ontology](http://planteome.org/) for plant tissue types or development stages +- [FAIRSharing.org](https://fairsharing.org/) lists standards and ontologies for life sciences. + +The CyVerse Data Commons supports good data description through: + +- Metadata templates +- Bulk metadata upload +- Automatic collection of analysis parameters, inputs, and outputs in the DE. + +**Preserve** + +In general, data must be preserved in an appropriate long-term archive +(i.e. data center). Here are some examples: + +- Sequence data should go to a national repository, frequently [NCBI](https://www.ncbi.nlm.nih.gov/) +- Identify data with value - it may not be necessary to preserve all data from a project +- The CyVerse [Data Commons](http://datacommons.cyverse.org/) provides a place to publish and preserve data that was generated on or can be used in CyVerse, where no other repository exists. +- See lists of repositories at [FAIRSharing.org](https://fairsharing.org/) +- See lists of repositories at [Data Dryad](https://datadryad.org/stash) +- Github repos can get DOIs through [Zenodo](https://guides.github.com/activities/citable-code/) +- Be aware of licensing and other intellectual property issues + - Repositories will require some kind of license, often the + least restrictive (see for example [Creative Commons](https://creativecommons.org/)) + - Repositories are unlikely to enforce reuse restrictions, even + if you apply them. + +**Discover** + +- Good metadata allows you to discover your own data! +- Databases, repositories, and search indices provide ways to + discover relevant data for reuse + - [Google dataset search](https://toolbox.google.com/datasetsearch) + - [DataOne](https://www.dataone.org/) + - [FAIRSharing.org](https://fairsharing.org/) + +**Integrate** + +- Data integration is a lot of work +- Standards and ontologies are key to future data integration +- Know the data before you integrate them +- Don't trust that two columns with the same header are the same data +- Properly cite the data you reuse! +- Use DOIs ([Digital Object Identifiers](https://en.wikipedia.org/wiki/Digital_object_identifier)) wherever possible + +**Analyze** + +- Follow open science principles for reproducible analyses (CyVerse, + RStudio, notebooks, IDEs) +- State your hypotheses and analysis workflow before collecting + data. Tools like [Open Science Framework](https://osf.io/) (OSF) allow you to make this public. +- Record all software, parameters, inputs, etc. + +--- + +### References and Resources + +[DataOne best practices](https://dataoneorg.github.io/Education/bestpractices/) + +[Center for Open Science](https://cos.io/) + +--- + +## FAIR Data + +!!! Info "Learning Objectives" + - Recall the meaning of FAIR + - Understand why FAIR is a collection of principles (rather than rules) + - Use self-assessments to evaluate the FAIRness of your data + +### FAIR Principles + +In 2016, the [FAIR Guiding Principles](https://www.nature.com/articles/sdata201618) for scientific data management and stewardship were +published in Scientific Data. Read it. + +**Findable** + +- F1. (meta)data are assigned a globally unique and persistent identifier +- F2. data are described with rich metadata (defined by R1 below) +- F3. metadata clearly and explicitly include the identifier of the data it describes +- F4. (meta)data are registered or indexed in a searchable resource + +**Accessible** + +- A1. (meta)data are retrievable by their identifier using a + standardized communications protocol +- A1.1 the protocol is open, free, and universally implementable +- A1.2 the protocol allows for an authentication and authorization + procedure, where necessary +- A2. metadata are accessible, even when the data are no longer + available + +**Interoperable** + +- I1. (meta)data use a formal, accessible, shared, and broadly + applicable language for knowledge representation. +- I2. (meta)data use vocabularies that follow FAIR principles +- I3. (meta)data include qualified references to other (meta)data + +**Reusable** + +- R1. meta(data) are richly described with a plurality of accurate + and relevant attributes +- R1.1. (meta)data are released with a clear and accessible data + usage license +- R1.2. (meta)data are associated with detailed provenance +- R1.3. (meta)data meet domain-relevant community standard + +!!! Tip + Open vs. Public vs. FAIR: + + FAIR does not demand that data be open: See one definition of open: + http://opendefinition.org/ + +!!! Tip + **Why Principles?** + + FAIR is a collection of principles. Ultimately, different + communities within different scientific disciplines must work to + interpret and implement these principles. Because technologies + change quickly, focusing on the desired end result allows FAIR to be + applied to a variety of situations now and in the foreseeable + future. + +### CARE Principles + +The [CARE Principles](https://www.gida-global.org/care) for Indigenous Data Governance were drafted at the International Data Week and Research Data Alliance Plenary co-hosted event "Indigenous Data Sovereignty Principles for the Governance of Indigenous Data Workshop," 8 November 2018, Gaborone, Botswana. + +**Collective Benefit** + +- C1. For inclusive development and innovation +- C2. For improved governance and citizen engagement +- C3. For equitable outcomes + +**Authority to Control** + +- A1. Recognizing rights and interests +- A2. Data for governance +- A3. Governance of data + +**Responsibility** + +- R1. For positive relationships +- R2. For expanding capability and capacity +- R3. For Indigenous languages and worldviews + +**Ethics** + +- E1. For minimizing harm and maximizing benefit +- E2. For justice +- E3. For future use + +### FAIR - TLC + +**Traceable, Licensed, and Connected** + +- The need for metrics: https://zenodo.org/record/203295#.XkrzTxNKjzI + +### How to get to FAIR? + +This is a question that only you can answer, that is because it depends +on (among other things) + +1. Your scientific discipline: Your datatypes and existing standards + for what constitutes acceptable data management will vary. +2. The extent to which your scientific community has implemented + FAIR: Some disciplines have significant guidelines on FAIR, while + others have not addressed the subject in any concerted way. +3. Your level of technical skills: Some approaches to implementing + FAIR may require technical skills you may not yet feel comfortable + with. + +While a lot is up to you, the first step is to evaluate how FAIR you +think your data are: + +!!! Question "Exercise" + Thinking about a dataset you work with, complete the ARDC [FAIR assessment](https://ardc.edu.au/resource/fair-data-self-assessment-tool/). + +--- + +### References and Resources + + + +--- + +## Data Management Plans + +!!! Info "Learning Objectives" + - Describe the purpose of a data management plan + - Describe the important elements of a data management plan + - Use a self-assessment to design a data management plan + +### What is a DMP? + +"A data management plan or DMP is a formal document that outlines how +data are to be handled both during a research project, and after the +project is completed. [1] The goal of a data management plan is to +consider the many aspects of data management, metadata generation, data +preservation, and analysis before the project begins; this may lead to +data being well-managed in the present, and prepared for preservation in +the future." + +(Source: https://en.wikipedia.org/wiki/Data_management_plan) + +[Example DMP](https://github.com/CyVerse-learning-materials/foss/blob/main/slides/DMPFuTRES_v2.docx) + + +### Today's Guest Speaker + +
+ ![Image title](/assets/wade_bishop.jpg){ width="200" } +
Dr. Wade Bishop, Professor in the School of Information Sciences at University of Tennessee, Knoxville
+
+ + +Dr. Bishop's [article](https://datascience.codata.org/articles/10.5334/dsj-2023-027){target=_blank} on DMPs + +--- + +**Why bother with a DMP?** + +!!! Question "How would you answer?" + Do you have a data management plan? If so, how do you use it? + +*"Those who fail to plan, plan to fail"* + +Returning to the assertion that data (and its value) is at the +foundation of your science, working without a data management plan +should be considered scientific misconduct. + +Those are strong words. And while we might have an intuition of the +boundaries of research ethics - data mismanagement seems more like an +annoyance than misconduct. However, if your mismanagement leads to error +in your research data, or the inability to make publicly-funded research +open to the public, these are serious consequences. Increasingly, +funders realize this. + +**Stick:** + +- [You have to make one](https://www.nsf.gov/pubs/2019/nsf19069/nsf19069.jsp) +- Reviewers definitely look at them, but they may not be enforced. + +**Carrot:** + +- Make your life easier +- Planning for you project makes it run more smoothly +- Avoid surprise costs + +### Elements of a good DMP + +- Information about data & data format(s) + - data types + - data sources + - analysis methods + - formats + - QA/QC + - version control + - **data life cycle** + +- Metadata content and format(s) + - format + - standards + +- Policies for access, sharing, and re-use + - funder obligations + - ethical and privacy issues (data justice) + - intellectual property, copyright, citation + - timeline for releases + +- Long-term storage, data management, and preservation + - which data to preserve + - which archive/repository + +- Budget([PAPPG](https://www.nsf.gov/pubs/policydocs/pappg19_1/pappg_2.jsp#IIC2gvib)) + - each of the above elements cost time/money + - Personnel time for data preparation, management, + documentation, and preservation (including time) + - Hardware and/or software for data management, back up, + security, documentation, and preservation (including time) + - Publication/archiving costs (including time) + +Not only what, but *who* (roles). + +Extra challenges for collaborative projects. + +### Machine actionable DMPs + +- DMPs describe research methods that will evolve over the course of a project +- to be a useful tool for researchers and others, the content must + be updated to capture the methods that are employed and the data + that are produced + +![maDMP](assets/journal.pcbi.1006750.g002.png) + +(Source: https://doi.org/10.1371/journal.pcbi.1006750.g002) + +### Tools for DMPs + +!!! Question "Exercise" + Thinking about a dataset you work with, complete the [Data Stewardship Wizard](https://ds-wizard.org/). + +--- + +## Licenses + +By default, when you make creative work, that work is under exclusive copyright. This means that you have the right to decide how your work is used, and that others must ask your permission to use your work. + +If you want your work to be Open and used by others, you need to specify how others can use your work. This is done by *licensing* your work. + +[MIT License](https://choosealicense.com/licenses/mit/{target=_blank}) + +[GNU General Public License v3.0](https://choosealicense.com/licenses/gpl-3.0/){target=_blank} + + +FOSS material has been licensed using the [Creative Commons Attribution 4.0 International License](https://creativecommons.org/licenses/by/4.0/){target=_blank}. +
+
+ +Licensing your [Github Repository](https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/licensing-a-repository#choosing-the-right-license){target=_blank} + +* Apache License 2.0 +* GNU General Public License v3.0 +* MIT License +* BSD 2-Clause "Simplified" License +* BSD 3-Clause "New" or "Revised" License +* Boost Software License 1.0 +* Creative Commons Zero v1.0 Universal +* Eclipse Public License 2.0 +* GNU Affero General Public License v3.0 +* GNU General Public License v2.0 +* GNU Lesser General Public License v2.1 +* Mozilla Public License 2.0 +* The Unlicense + +
+
+ +Open Source Licensing Resources + +https://choosealicense.com/ + +https://opensource.guide/legal/ + + +--- + +### References and Resources + +- [NSF Guidelines on DMPs](https://www.nsf.gov/bio/biodmp.jsp) +- https://dmptool.org/general_guidance +- https://dmptool.org/public_templates +- Professional and scholarly societies, e.g., theEcological Society of America http://www.esa.org/esa/science/data-sharing/resources-and-tools/ +- DataOne - https://dataoneorg.github.io/Education/bestpractices/ +- Data Carpentry - http://datacarpentry.org/ +- The US Geological Survey https://www.usgs.gov/data-management +- Repository registry (and search) service: http://www.re3data.org/ +- Your university library + +---- + +## Self Assessment + +??? Question "What is a Data Management Plan?" + + **Important**: A data management plan (DMP) is now required aspect of publicly funded research. + + DMPs are short, formal, documents outlining what types of data will be used, and what will be done with the data both during and after a research project concludes. + +??? Question "True or False: When science project funding ends, the data should end with it" + + !!! Success "False" + + Data live on after a project ends. + + Ensuring that data have a full lifecycle where they can be (re)hosted and made available after a project ends is critical to open science and reproducible research + + !!! Danger "Maybe" + + Sometimes destroying data is part of the life cycle of data - this may be required if data are sensitive and could be used unethically in the future, beyond the control of the original investigator team. + +??? Question "True or False: FAIR and CARE data principles are the same" + + !!! Success "False" + + The CARE principles were created in order to help guide and answer when and how applying FAIR data principles to soverign indigenous-controlled data should be done and when it should not. + diff --git a/03_managing_data/index.html b/03_managing_data/index.html new file mode 100644 index 000000000..3e961be3a --- /dev/null +++ b/03_managing_data/index.html @@ -0,0 +1,1932 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 2. Managing Data - CyVerse Foundational Open Science Skills 2023 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Managing Data

+
+

Learning Objectives

+

After this lesson, you should be able to:

+
    +
  • Recognize data as the foundation of open science and be able to describe the "life cycle of data"
  • +
  • Use self-assessments to evaluate your current data management practices
  • +
  • Cite tools and resources to improve your data management practices
  • +
  • Know the biggest challenge to effective data management
  • +
+
+

Why should you care about data management?

+

Ensuring that data are effectively organized, shared, and preserved +is critical to making your science impactful, efficient, and open.

+
+Warning +

The biggest challenge to data management is making it an afterthought.

+

Unfortunately, poor data management doesn't have a high upfront cost. +You can do substantial work before realizing you are in trouble. Like +a swimmer in rip current, by the time you realize you are in trouble, +you may already be close to drowning.

+

The solution? Make data management the first thing you consider when +starting a research project. It also needs to be a policy you +institute right away for your research group.

+
+
+

How would you answer?

+
- If you give your data to a colleague who has not been involved with your project, would they be able to make sense of it? Would they be able to use it properly?
+- If you come back to your own data in five years, will you be able to make sense of it? Will you be able to use it properly?
+- When you are ready to publish a paper, is it easy to find all the correct versions of all the data you used and present them in a comprehensible manner?
+
+
+

Well-managed Data Sets:

+
    +
  • Make life much easier for you and your collaborators
  • +
  • Benefit the scientific research community by allowing others to reuse your data
  • +
  • Are required by most funders and many journals
  • +
  • Recent Dear Colleague letter from NSF
  • +
  • NSF proposal preparation guidelines
  • +
+
+

Data Self-assessment

+

Part I: Basic questions

+

Here are some questions about how you manage and work with data.

+
+

Activity

+
In small groups, discuss the following questions. You will be provided with a space for documenting our shared answers.
+
+1\. What are the two or three data types that you most frequently work with?
+    -   Think about the sources (observational, experimental, simulated, compiled/derived)
+    -   Also consider the formats (tabular, sequence, database, image, etc.)
+
+2\.  What is the scale of your data?
+
+??? Tip
+    We often talk about the scale of data using the ["Three V's"](https://www.bigdataframework.org/four-vs-of-big-data/):
+
+    -   Volume: Size of the data (MBs, GBs, TBs); can also include how many files (e.g dozens of big files, or millions of small ones)
+    -   Velocity: How quickly are these data produced and analyzed? A lot coming in a single batch infrequently, or, a constant small amount of data that must be rapidly analyzed?
+    -   Variety: How many different data types (raw files? databases?)
+    A fourth V (Veracity) captures the need to make decisions about data processing (i.e., separating low- and high-quality data)
+
+3\.  What is your strategy for storing and backing up your data?
+
+4\.  What is your strategy for verifying the integrity of your data? (i.e. verifying that your data has not be altered)
+
+5\.  What is your strategy for searching your data?
+
+6\.  What is your strategy for sharing (and getting credit for) your data? (i.e. How will do you share with your community/clients? How is that sharing documented? How do you evaluate the impact of data shared? )
+
+
+
+

Data Management Basics

+

Let's learn a little more about data so that we can evaluate your +self-assessment responses.

+

Data Types

+

Different types of data require different management practices. What are +some data types and sources you might use in your work? (Adapted from +DMP Tool Data management general guidance)

+

Data Types

+
    +
  • Text: field or laboratory notes, survey responses
  • +
  • Numeric: tables, counts, measurements
  • +
  • Audiovisual: images, sound recordings, video
  • +
  • Models, computer code
  • +
  • Discipline-specific: FASTA in biology, FITS in astronomy, CIF in chemistry
  • +
  • Instrument-specific: equipment outputs
  • +
+

Data Sources

+

Observational

+
    +
  • Captured in real-time, typically outside the lab
  • +
  • Usually irreplaceable and therefore the most important to safeguard
  • +
  • Examples: Sensor readings, telemetry, survey results, images
  • +
+

Experimental

+
    +
  • Typically generated in the lab or under controlled conditions
  • +
  • Often reproducible, but can be expensive or time-consuming
  • +
  • Examples: gene sequences, chromatograms, magnetic field readings
  • +
+

Simulation

+
    +
  • Machine generated from test models
  • +
  • Likely to be reproducible if the model and inputs are preserved
  • +
  • Examples: climate models, economic models
  • +
+

Derived / Compiled

+
    +
  • Generated from existing datasets
  • +
  • Reproducible, but can be very expensive and time-consuming
  • +
  • Examples: text and data mining, compiled database, 3D models
  • +
+
+
+

Tip

+
**The Data Life Cycle**
+
+*Data management* is the set of practices that allow researchers to
+effectively and efficiently handle data throughout the data life
+cycle. Although typically shown as a circle (below) the actually
+life cycle of any data item may follow a different path, with
+branches and internal loops. Being aware of your data's future
+helps you plan how to best manage them.
+
+![lifecycle](assets/data_life_cycle.png)
+
+Image from [Strasser *et al*](https://www.dataone.org/sites/all/documents/DataONE_BP_Primer_020212.pdf).
+
+
+

Best practices for the data life cycle

+

The summary below is adapted from the excellent DataONE best practices primer.

+

Plan

+
    +
  • Describe the data that will be compiled, and how the data will be managed and made accessible throughout its lifetime
  • +
  • A good plan considers each of the stages below
  • +
+
+Warning +

The biggest challenge to data management making it an afterthought.

+

Unfortunately, poor data management doesn't have a high upfront cost. +You can do substantial work before realizing you are in trouble. Like +a swimmer in rip current, by the time you realize you are in trouble, +you may already be close to drowning.

+

The solution? Make data management the first thing you consider when +starting a research project. It also needs to be a policy you +institute right away for your research group.

+
+

Collect

+
    +
  • Have a plan for data organization in place before collecting data
  • +
  • Collect and store observation metadata at the same time you collect the metadata
  • +
  • Take advantage of machine generated metadata
  • +
+

Assure

+
    +
  • Record any conditions during collection that might affect the quality of the data
  • +
  • Distinguish estimated values from measured values
  • +
  • Double check any data entered by hand
  • +
  • Perform statistical and graphical summaries (e.g., max/min, average, range) to check for questionable or impossible values.
  • +
  • Mark data quality, outliers, missing values, etc.
  • +
+

Describe:

+
    +
  • +

    Comprehensive data documentation (i.e. metadata) is the key to + future understanding of data. Without a thorough description of + the context of the data, the context in which they were collected, + the measurements that were made, and the quality of the data, it + is unlikely that the data can be easily discovered, understood, or + effectively used.

    +
  • +
  • +

    Organize your data for publication. Before you can describe your + data, you must decide how to organize them. This should be planned + before hand, so that data organization is a minimal task at the + time of publication.

    +
  • +
  • +

    Thoroughly describe the dataset (e.g., name of dataset, list of + files, date(s) created or modified, related datasets) including + the people and organizations involved in data collection (e.g., + authors, affiliations, sponsor). Also include:

    +
      +
    • An ORCID (obtain one if you don't have one).
    • +
    • The scientific context (reason for collecting the data, how they were collected, equipment and software used to generate the data, conditions during data collection, spatial and temporal resolution)
    • +
    • The data themselves
    • +
    • How each measurement was produced
    • +
    • Units
    • +
    • Format
    • +
    • Quality assurance activities
    • +
    • Precision, accuracy, and uncertainty
    • +
    +
  • +
+

Metadata standards and ontologies are invaluable for supporting data +reuse. Metadata standards tell you:

+
    +
  • Which metadata attributes to include
  • +
  • How to format your metadata
  • +
  • What values are allowable for different attributes
  • +
+

Some metadata standards you may want to consider:

+ +

Ontologies provide standardization for metadata values:

+ +

The CyVerse Data Commons supports good data description through:

+
    +
  • Metadata templates
  • +
  • Bulk metadata upload
  • +
  • Automatic collection of analysis parameters, inputs, and outputs in the DE.
  • +
+

Preserve

+

In general, data must be preserved in an appropriate long-term archive +(i.e. data center). Here are some examples:

+
    +
  • Sequence data should go to a national repository, frequently NCBI
  • +
  • Identify data with value - it may not be necessary to preserve all data from a project
  • +
  • The CyVerse Data Commons provides a place to publish and preserve data that was generated on or can be used in CyVerse, where no other repository exists.
  • +
  • See lists of repositories at FAIRSharing.org
  • +
  • See lists of repositories at Data Dryad
  • +
  • Github repos can get DOIs through Zenodo
  • +
  • Be aware of licensing and other intellectual property issues
      +
    • Repositories will require some kind of license, often the + least restrictive (see for example Creative Commons)
    • +
    • Repositories are unlikely to enforce reuse restrictions, even + if you apply them.
    • +
    +
  • +
+

Discover

+ +

Integrate

+
    +
  • Data integration is a lot of work
  • +
  • Standards and ontologies are key to future data integration
  • +
  • Know the data before you integrate them
  • +
  • Don't trust that two columns with the same header are the same data
  • +
  • Properly cite the data you reuse!
  • +
  • Use DOIs (Digital Object Identifiers) wherever possible
  • +
+

Analyze

+
    +
  • Follow open science principles for reproducible analyses (CyVerse, + RStudio, notebooks, IDEs)
  • +
  • State your hypotheses and analysis workflow before collecting + data. Tools like Open Science Framework (OSF) allow you to make this public.
  • +
  • Record all software, parameters, inputs, etc.
  • +
+
+

References and Resources

+

DataOne best practices

+

Center for Open Science

+
+

FAIR Data

+
+

Learning Objectives

+
-   Recall the meaning of FAIR
+-   Understand why FAIR is a collection of principles (rather than rules)
+-   Use self-assessments to evaluate the FAIRness of your data
+
+
+

FAIR Principles

+

In 2016, the FAIR Guiding Principles for scientific data management and stewardship were +published in Scientific Data. Read it.

+

Findable

+
    +
  • F1. (meta)data are assigned a globally unique and persistent identifier
  • +
  • F2. data are described with rich metadata (defined by R1 below)
  • +
  • F3. metadata clearly and explicitly include the identifier of the data it describes
  • +
  • F4. (meta)data are registered or indexed in a searchable resource
  • +
+

Accessible

+
    +
  • A1. (meta)data are retrievable by their identifier using a + standardized communications protocol
  • +
  • A1.1 the protocol is open, free, and universally implementable
  • +
  • A1.2 the protocol allows for an authentication and authorization + procedure, where necessary
  • +
  • A2. metadata are accessible, even when the data are no longer + available
  • +
+

Interoperable

+
    +
  • I1. (meta)data use a formal, accessible, shared, and broadly + applicable language for knowledge representation.
  • +
  • I2. (meta)data use vocabularies that follow FAIR principles
  • +
  • I3. (meta)data include qualified references to other (meta)data
  • +
+

Reusable

+
    +
  • R1. meta(data) are richly described with a plurality of accurate + and relevant attributes
  • +
  • R1.1. (meta)data are released with a clear and accessible data + usage license
  • +
  • R1.2. (meta)data are associated with detailed provenance
  • +
  • R1.3. (meta)data meet domain-relevant community standard
  • +
+
+

Tip

+
Open vs. Public vs. FAIR:
+
+FAIR does not demand that data be open: See one definition of open:
+http://opendefinition.org/
+
+
+
+

Tip

+
**Why Principles?**
+
+FAIR is a collection of principles. Ultimately, different
+communities within different scientific disciplines must work to
+interpret and implement these principles. Because technologies
+change quickly, focusing on the desired end result allows FAIR to be
+applied to a variety of situations now and in the foreseeable
+future.
+
+
+

CARE Principles

+

The CARE Principles for Indigenous Data Governance were drafted at the International Data Week and Research Data Alliance Plenary co-hosted event "Indigenous Data Sovereignty Principles for the Governance of Indigenous Data Workshop," 8 November 2018, Gaborone, Botswana.

+

Collective Benefit

+
    +
  • C1. For inclusive development and innovation
  • +
  • C2. For improved governance and citizen engagement
  • +
  • C3. For equitable outcomes
  • +
+

Authority to Control

+
    +
  • A1. Recognizing rights and interests
  • +
  • A2. Data for governance
  • +
  • A3. Governance of data
  • +
+

Responsibility

+
    +
  • R1. For positive relationships
  • +
  • R2. For expanding capability and capacity
  • +
  • R3. For Indigenous languages and worldviews
  • +
+

Ethics

+
    +
  • E1. For minimizing harm and maximizing benefit
  • +
  • E2. For justice
  • +
  • E3. For future use
  • +
+

FAIR - TLC

+

Traceable, Licensed, and Connected

+ +

How to get to FAIR?

+

This is a question that only you can answer, that is because it depends +on (among other things)

+
    +
  1. Your scientific discipline: Your datatypes and existing standards + for what constitutes acceptable data management will vary.
  2. +
  3. The extent to which your scientific community has implemented + FAIR: Some disciplines have significant guidelines on FAIR, while + others have not addressed the subject in any concerted way.
  4. +
  5. Your level of technical skills: Some approaches to implementing + FAIR may require technical skills you may not yet feel comfortable + with.
  6. +
+

While a lot is up to you, the first step is to evaluate how FAIR you +think your data are:

+
+

Exercise

+
Thinking about a dataset you work with, complete the ARDC [FAIR assessment](https://ardc.edu.au/resource/fair-data-self-assessment-tool/).
+
+
+
+

References and Resources

+

https://www.nature.com/articles/sdata201618

+
+

Data Management Plans

+
+

Learning Objectives

+
-   Describe the purpose of a data management plan
+-   Describe the important elements of a data management plan
+-   Use a self-assessment to design a data management plan
+
+
+

What is a DMP?

+

"A data management plan or DMP is a formal document that outlines how +data are to be handled both during a research project, and after the +project is completed. [1] The goal of a data management plan is to +consider the many aspects of data management, metadata generation, data +preservation, and analysis before the project begins; this may lead to +data being well-managed in the present, and prepared for preservation in +the future."

+

(Source: https://en.wikipedia.org/wiki/Data_management_plan)

+

Example DMP

+

Today's Guest Speaker

+
+

Image title +

+
Dr. Wade Bishop, Professor in the School of Information Sciences at University of Tennessee, Knoxville
+
+

Dr. Bishop's article on DMPs

+
+

Why bother with a DMP?

+
+

How would you answer?

+
Do you have a data management plan? If so, how do you use it?
+
+
+

"Those who fail to plan, plan to fail"

+

Returning to the assertion that data (and its value) is at the +foundation of your science, working without a data management plan +should be considered scientific misconduct.

+

Those are strong words. And while we might have an intuition of the +boundaries of research ethics - data mismanagement seems more like an +annoyance than misconduct. However, if your mismanagement leads to error +in your research data, or the inability to make publicly-funded research +open to the public, these are serious consequences. Increasingly, +funders realize this.

+

Stick:

+ +

Carrot:

+
    +
  • Make your life easier
  • +
  • Planning for you project makes it run more smoothly
  • +
  • Avoid surprise costs
  • +
+

Elements of a good DMP

+
    +
  • +

    Information about data & data format(s)

    +
      +
    • data types
    • +
    • data sources
    • +
    • analysis methods
    • +
    • formats
    • +
    • QA/QC
    • +
    • version control
    • +
    • data life cycle
    • +
    +
  • +
  • +

    Metadata content and format(s)

    +
      +
    • format
    • +
    • standards
    • +
    +
  • +
  • +

    Policies for access, sharing, and re-use

    +
      +
    • funder obligations
    • +
    • ethical and privacy issues (data justice)
    • +
    • intellectual property, copyright, citation
    • +
    • timeline for releases
    • +
    +
  • +
  • +

    Long-term storage, data management, and preservation

    +
      +
    • which data to preserve
    • +
    • which archive/repository
    • +
    +
  • +
  • +

    Budget(PAPPG)

    +
      +
    • each of the above elements cost time/money
    • +
    • Personnel time for data preparation, management, + documentation, and preservation (including time)
    • +
    • Hardware and/or software for data management, back up, + security, documentation, and preservation (including time)
    • +
    • Publication/archiving costs (including time)
    • +
    +
  • +
+

Not only what, but who (roles).

+

Extra challenges for collaborative projects.

+

Machine actionable DMPs

+
    +
  • DMPs describe research methods that will evolve over the course of a project
  • +
  • to be a useful tool for researchers and others, the content must + be updated to capture the methods that are employed and the data + that are produced
  • +
+

maDMP

+

(Source: https://doi.org/10.1371/journal.pcbi.1006750.g002)

+

Tools for DMPs

+
+

Exercise

+
Thinking about a dataset you work with, complete the [Data Stewardship Wizard](https://ds-wizard.org/).
+
+
+
+

Licenses

+

By default, when you make creative work, that work is under exclusive copyright. This means that you have the right to decide how your work is used, and that others must ask your permission to use your work.

+

If you want your work to be Open and used by others, you need to specify how others can use your work. This is done by licensing your work.

+

MIT License

+

GNU General Public License v3.0

+

FOSS material has been licensed using the Creative Commons Attribution 4.0 International License. +
+

+

Licensing your Github Repository

+
    +
  • Apache License 2.0
  • +
  • GNU General Public License v3.0
  • +
  • MIT License
  • +
  • BSD 2-Clause "Simplified" License
  • +
  • BSD 3-Clause "New" or "Revised" License
  • +
  • Boost Software License 1.0
  • +
  • Creative Commons Zero v1.0 Universal
  • +
  • Eclipse Public License 2.0
  • +
  • GNU Affero General Public License v3.0
  • +
  • GNU General Public License v2.0
  • +
  • GNU Lesser General Public License v2.1
  • +
  • Mozilla Public License 2.0
  • +
  • The Unlicense
  • +
+


+

+

Open Source Licensing Resources

+

https://choosealicense.com/

+

https://opensource.guide/legal/

+
+

References and Resources

+ +
+

Self Assessment

+
+What is a Data Management Plan? +

Important: A data management plan (DMP) is now required aspect of publicly funded research.

+

DMPs are short, formal, documents outlining what types of data will be used, and what will be done with the data both during and after a research project concludes.

+
+
+True or False: When science project funding ends, the data should end with it +
+

False

+

Data live on after a project ends.

+

Ensuring that data have a full lifecycle where they can be (re)hosted and made available after a project ends is critical to open science and reproducible research

+
+
+

Maybe

+

Sometimes destroying data is part of the life cycle of data - this may be required if data are sensitive and could be used unethically in the future, beyond the control of the original investigator team.

+
+
+
+True or False: FAIR and CARE data principles are the same +
+

False

+

The CARE principles were created in order to help guide and answer when and how applying FAIR data principles to soverign indigenous-controlled data should be done and when it should not.

+
+
+ +
+
+ + + Last update: + 2023-10-18 + + +
+ + + + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/04_documentation_communication/04_documentation_communication.md b/04_documentation_communication/04_documentation_communication.md new file mode 100644 index 000000000..04d2c595d --- /dev/null +++ b/04_documentation_communication/04_documentation_communication.md @@ -0,0 +1,631 @@ +# :material-file-document-multiple: Documentation & :material-antenna: Communication + +!!! Success "Learning Objectives" + + After this lesson, you should be able to: + + * Identify and explain different types of project documentation (both internal and external) + * Describe tools and approaches to creating your own documentation + * Describe best practices for maintaining documentation + * Identify and explain different communication strategies for working in a team (virtual and in person) + * Create your own GitHub Pages website (!) + +Peer-reviewed manuscripts or conference preceedings / presentations /posters are one of the primary ways of communicating science, but they are far from the only avenues of communcation available to us as researchers and educators. + +As our methods become more complicated and customized, open science means giving people a better understanding of our approaches and tools than may be required in most journals. + +Communicating amongst a team of researchers that may span institutions, time zones, or continents also requires more modern approaches. + +Strong frameworks for internal communication and documentation can make collaboration easier, improve the quality of your science, and reduce the hassle of collaborative work. + +## :material-file-document-multiple: Project Documentation + +Documentation is not only the practice of recording, preserving, and organizing information, data, or details in a structured and systematic manner. Documentation is essentially the ability to communicate with your future self, or your collaborators, or the world specific ideas and information. Effective documentation must take into consideration the following points: + +- **:material-glasses: Clarity**: Documentation should be easy to understand with clear language and no ambiguity. +- **:octicons-circle-16: Completeness**: It must cover all essential details, leaving nothing crucial undocumented. +- **:fontawesome-solid-bullseye: Accuracy**: Information should be up-to-date and correct to prevent errors and misunderstandings. +- **:simple-instructure: Organization**: A logical structure and clear organization make it easy to navigate and find information. +- **:fontawesome-solid-exclamation: Relevance**: Documentation should focus on what's pertinent to its intended audience or purpose, avoiding unnecessary information. + +!!! info "We've already covered many best practices regarding project and data management, topics which are very much intertwined with documentation. Here are some things to keep in mind when addressing documentation:" + - **Metadata**: Implement standardized metadata formats for research outputs to enhance findability. + - **Data Repositories**: Deposit research data and materials in trusted data repositories that adhere to FAIR principles, making them easily accessible and reusable. + - **Persistent Identifiers**: Assign persistent identifiers (e.g., DOIs, ORCIDs) to datasets, publications, and researchers, ensuring their long-term accessibility and recognition. + - **Templates**: Create and utilize documentation templates for research datasets, methods, and software to ensure uniform and comprehensive information capture. *It saves time!!* + +Not all documentation is the same. **The documentation system**, by Divio, categorizes the different types of documentation into 4 quadrants: + +
+ ![xkcd](https://documentation.divio.com/_images/overview.png) +
Read more in depth on the documentation system here: https://documentation.divio.com
+
+ +??? Question "Explanining the quadrants" + + - **Tutorials**: Lessons! Tutorials are lessons that take the reader by the hand through a series of steps to complete a project of some kind. They are what your project needs in order to show a beginner that they can achieve something with it. + - **How-to-guides**: Recipes! How-to-guides take the reader through the steps required to solve a real-world problem. + - **References**: Guides! References offer technical descriptions of the machinery and how to operate it. References have one job only: to describe. They are code-determined, because ultimately that’s what they describe: key classes, functions, APIs, and so they should list things like functions, fields, attributes and methods, and set out how to use them. + - **Explanation**: Discussions! The aims of explanations are to clarify and illuminate a particular topic by broadening the documentation’s coverage of a topic. + +!!! tip "Furthermore, one of the main issues with documentation usually becomes keeping documents up-to-date. Here are some tips one can keep in mind to address the issue:" + + - **Data Lifecycle Planning**: Develop a clear data management and documentation plan at the outset of the research project to ensure consistency and continuity. + - **Version Control**: Use version control systems like Git for code and documentation to track changes, facilitate collaboration, and maintain a history of updates. + - **Workflows and Automation**: Explore automation tools for documentation, such as generating metadata from data headers or embedding documentation within code using tools like [GitHub pages](https://pages.github.com/), [Sphinx](https://www.sphinx-doc.org/en/master/) and [LaTeX](https://www.latex-project.org/). + - **Collaborative Documentation Platforms**: Employ collaborative platforms like [Overleaf](https://www.overleaf.com/) or [Google Docs](https://www.google.com/docs/about/) to enable multiple researchers to contribute to and update documentation. + - **Documentation Reviews**: Schedule regular documentation reviews within research teams to identify gaps or outdated information and address them promptly. + +### Public Repositories for Documentation + +This website is rendered using [:simple-github: GitHub Pages](https://pages.github.com/){target=_blank} using [:simple-markdown: MkDocs](https://www.mkdocs.org/){target=_blank} and the [Material](https://squidfunk.github.io/mkdocs-material/){target=_blank} theme for MkDocs. + +Other popular website generators for GitHub Pages are [:simple-jekyll: Jekyll Theme](https://jekyllrb.com/){target=_blank} or [:simple-bootstrap: Bootstrap.js](https://getbootstrap.com/){target=_blank}. + +[:simple-readthedocs: ReadTheDocs.org](https://readthedocs.org/){target=_blank} has become a popular tool for developing web-based documentation. Think of RTD as "Continuous Documentation". + +[:material-book-arrow-down: Bookdown](https://bookdown.org/){target=_blank} is an open-source R package that facilitates writing books and long-form articles/reports with R Markdown. + +[:simple-r: Quarto](https://quarto.org/){target=_blank} is an open-source scientific and technical publishing system built on Pandoc + +[:simple-confluence: Confluence Wikis (CyVerse)](https://wiki.cyverse.org){target=_blank} are another tool +for documenting your workflow. + +!!! Quote "Things to remember about Documentation" + + - Documentation should be written in such a way that people who did not write the documentation can read and then use or read and then teach others in the applications of the material. + + - Documentation is best treated as a living document, but version control is necessary to maintain it + + - Technology changes over time, expect to refresh documentation every 3-5 years as your projects age and progress. + +*:simple-github: GitHub Pages* + +- You can pull templates from other GitHub users for your website, + e.g. [:simple-jekyll: Jekyll themes](http://themes.jekyllrc.org/){target=_blank} +- GitHub pages are free, fast, and easy to build, but limited in use + of subdomain or URLs. + +*:simple-readthedocs: ReadTheDocs* + +- publishing websites via + [ReadTheDocs.com](https://readthedocs.com/dashboard/){target=_blank} costs money. +- You can work in an offline state, where you develop the materials + and publish them to your localhost using + [Sphinx](https://docs.readthedocs.io/en/stable/intro/getting-started-with-sphinx.html){target=_blank} +- You can work on a website template in a GitHub repository, and + pushes are updated in near real time using ReadTheDocs.com. + +*:simple-markdown: Material MkDocs* + +- publish via GitHub Actions +- Uses open source Material or ReadTheDocs Themes + +*:material-book-arrow-down: Bookdown* + +- Bookdown websites can be hosted by [RStudio + Connect](https://www.rstudio.com/products/connect/){target=_blank} +- You can publish a Bookdown website using [Github + Pages](https://github.blog/2016-08-17-simpler-github-pages-publishing/){target=_blank} + +*:simple-r: Quarto* + +- [Build a website](https://quarto.org/docs/websites/) using Quarto's template builder +- [Build with Github Pages](https://quarto.org/docs/publishing/github-pages.html) + +*:simple-jupyter: JupyterBook* + +- [Based on Project Jupyter](https://jupyterbook.org/en/stable/start/overview.html){target=_blank} `ipynb` and MarkDown +- Uses `conda` package management + +*:simple-git: GitBook* + +- [GitBook](https://docs.gitbook.com/){target=_blank} websites use MarkDown syntax +- Free for open source projects, paid plans are available + +### Websites to Host Methods & Protocols + +[Open Science Framework](https://osf.io/){target=_blank} for free. OSF can be directly linked to your ORCID. + +- Integrated project management tools +- Uses templates to create a project website +- Can publish preprints from within project management tools + +[Protocols.io](https://www.protocols.io/){target=_blank} - collaborative platform and preprint server for: science methods, computational workflows, clinical trials, operational procedures, safety checklists, and instructions / manuals. + +[QUBES](https://qubeshub.org/){target=_blank} - community of math and biology educators who share resources and methods for preparing students to tackle real, complex, biological problems. + +??? Question "What are the benefits of using a GitHub.io website?" + + [Github Pages](https://pages.github.com/) are hosted directly from your GitHub repository. + + Just edit, push, and your changes are live. + + You do not need to run your own web server!! + +--- + +## :material-antenna: Communication + +### Internal Project + +Choosing which software to use for your internal lab communication can be complicated by the cost of setting up, the cost of maintaining, and simply by the sheer number of platforms that are out there. + +For this workshop, we use [:simple-slack: SLACK](https://slack.com/){target=_blank} (Searchable Log of All Conversation & Knowledge). Microsoft's competitor to SLACK is [:material-microsoft-teams: Microsoft Teams](https://teams.microsoft.com/start){target=_blank}. + +Remember, the intention of these platforms are to **improve productivity** & not become a distraction. + +**:simple-slack: SLACK** + +- Slack has [plenty of apps](https://slack.com/apps){target=_blank} for coordinating + multiple services, i.e. Calendars, Github, GoogleDrive, Box, etc. +- Free Slack is limiting (e.g., 90 day history; limited connections across workspaces). +- Paid Slack is $7.25 per user per month. (10 users for 1 year = $870) + +**:material-microsoft-teams: Microsoft Teams** + +- Teams is used by many R1 research universities as part of their + campus wide license agreement for Office 365 Business and Education +- For example, anyone with a `arizona.edu` email address can use Teams for free +- Limitations: + - Not sure you can create your own Teams + - Limited to messaging with people in your university Team + +*Other popular alternatives* + +- [:fontawesome-brands-bandcamp: BaseCamp](https://basecamp.com/){target=_blank} +- [:simple-discord: Discord](https://discordapp.com/){target=_blank} +- [:simple-mastodon: Mastodon](https://joinmastodon.org/){target=_blank} +- [:simple-mattermost: Mattermost](https://mattermost.com/){target=_blank} + +!!! Info "Useful links for creating a SLACK workspace" + 1. [Create a new Workspace](https://get.slack.help/hc/en-us/articles/206845317-Create-a-Slack-workspace){target=_blank} + 2. [Create channels, add apps & tools](https://get.slack.help/hc/en-us/articles/217626298-tips-for-team-creators-and-admins){target=_blank} + +--- + +### External (Public) + +Communicating with the public and other members of your science community (in addition to traditional peer-review publications and conferences) is one of the most important parts of your science! + +There are many ways scientists use social media and the web to share their data science ideas: + +1. [:simple-twitter: "Science Twitter"](https://www.sciencemag.org/news/2018/08/scientists-do-you-want-succeed-twitter-here-s-how-many-followers-you-need){target=_blank} - + is really just regular [Twitter](https://twitter.com/hashtag/science?lang=en){target=_blank}, but with a + focus on following other scientists and organizations, and tweeting + about research you're interested in. By building up a significant + following, more people will know you, know about your work, and + you'll have a higher likelihood of meeting other new collaborators. +2. Blogging Platforms such as [Medium](https://medium.com/){target=_blank} are a great place to self publish your writing on just about any topic. + It's free to sign up and start blogging, but does have a fee for accessing premium content. Some of my favorite blogs include [Toward Data Science](https://towardsdatascience.com/){target=_blank} + and [Chris Holmes](https://medium.com/@cholmes){target=_blank}. +3. Community groups - There are lists (and lists of lists) of + [nationals research organizations](https://www.google.com/search?q=list+of+professional+science+organizations){target=_blank}, + in which a researcher can become involved. These older organziations + still rely on official websites, science journal blogs, and email + lists to communicate with their members. In the earth sciences there + are open groups which focus on communication like the [Earth Science + Information Partners (ESIP)](https://www.esipfed.org/){target=_blank} with + progressive ideas about how data and science can be done. Other + groups, like [The Carpentries](https://carpentries.org/){target=_blank} and + [Research Bazaar](https://resbazblog.wordpress.com/about/){target=_blank} are + focused on data science training and digital literacy. +4. Podcasts - Creating and distributing audio content to masses is easier than ever before. There are many podcast hosting platforms including [Spotify](https://podcasters.spotify.com/){target=_blank}, [Podbean](https://www.podbean.com/){target=_blank}, [Acast](https://www.acast.com/){target=_blank}, and [Libsyn](https://libsyn.com/){target=_blank}. From there is it simple to make your podcast availble in the [Google Podcast](https://rss.com/blog/how-to-submit-podcast-to-google-podcasts/){target=_blank} app or [Apple Podcast](https://transistor.fm/upload-podcast-itunes-apple/){target=_blank} app. +5. Webinars - With platforms such as [Zoom](https://zoom.us/){target=_blank}, [Microsoft Teams](https://www.microsoft.com/en-us/microsoft-teams/group-chat-software){target=_blank}, and [Google Meet](https://meet.google.com/){target=_blank}, it is so easy nowadays to host a webinar touting and explaining your science. +6. Youtube - The king of video sharing platforms is a great place to post content promoting your science (and yourself!). For example, [Cyverse](https://www.youtube.com/@CyverseOrgProject){target=_blank} posts lots of content on cyberinfrastructure and data processing pipelines. Some of my favorite podcasts hosted on Youtube include [StarTalk](https://www.youtube.com/@StarTalk){target=_blank} and [Lex Fridman](https://www.youtube.com/@lexfridman){target=_blank}. + + +!!! Warning "Important" + **Remember: Personal and Professional Accounts are Not Isolated** + + You decide what you post on the internet. Your scientist identity may be + a part of your personal identity on social media, it might be separate. + A future employer or current employer can see your old posts. What you + post in your personal accounts can be considered a reflection of the + organization you work for and may be used in decisions about hiring or + dismissal. + +### Addressing Effective Communication + +Whether internal or external, communication is important because it serves as the foundation for the exchange of information, ideas, and knowledge, enabling collaboration, understanding, and the advancement of individuals, organizations, and societies. It is therefore fundamental to be able to efficiently communicate, whether it is to promote a piece of scientific advancement, or reaching out to ask for help. + +The act of balancing transparency, openness, ethicality and respectful to personal data and intellectual property in communication has always been a challenge. A few methods to address these issues are the following: + +- **:simple-creativecommons: Utilize licensing options:** Researchers can use open licensing mechanisms such as Creative Commons licenses to specify the terms under which their work can be shared, modified, and reused, balancing openness with protection. +- **:fontawesome-solid-eye-low-vision: Data anonymization:** To address privacy and ethical concerns, researchers can anonymize sensitive data before sharing it, allowing for openness without compromising privacy. +- **:fontawesome-regular-handshake: Collaborative agreements:** Collaborative research agreements and partnerships can define expectations regarding data sharing, authorship, and intellectual property, ensuring transparency while safeguarding interests. +- **:material-account-voice: Transparent communication:** Researchers can openly communicate their intentions and progress regarding sharing and publication, fostering trust and collaboration within the research community. + +Scientists might also want to keep in mind the following when addressing communication: + +- **:material-email-off: Cultural shift**: Resistance to open communication within traditional academic cultures can hinder progress. Taking initiative and demonstrating the advantages of scientific communication outside of the traditional methods (mail, journals) can really help with showing eagerness and devotion to an idea or project +- **:octicons-accessibility-inset-16: Accessibility and inclusivity**: Ensuring that open communication is accessible to all, regardless of language, disability, or geographical location, is vital. Efforts to provide translations, accessible formats, and international collaboration can promote inclusivity. +- **:fontawesome-solid-users-viewfinder: Quality control**: Maintaining peer review and quality control in open access publications is essential. Initiatives like open peer review and establishing reputable open access journals can address this challenge. +- **:material-lock-off: Advocacy and policy**: Advocacy for open science policies at institutional and governmental levels can help overcome systemic barriers. Engaging with policymakers and advocating for open science initiatives is crucial. +--- + +## Hands-on: Building a GitHub Pages Website using MkDocs + +This section is built in order to educate on and simplify the steps necessary that newcomers need to take in order to build a successful GitHub Pages hosted website. + + This tutorial is inspired by [academicpages](https://academicpages.github.io/), a Jekyll themed template created in order to help scientists and academics build their own websites. + +The easy way would be to fork/import the [foss-reference-hub website](https://cyverse-learning-materials.github.io/foss-reference-hub/) ([repository](https://github.com/CyVerse-learning-materials/foss-reference-hub)) and modify it to reflect your requirements; this tutorial will cover the necessary files and repository structure you require in order to build a successful personal website. + +!!! info "Repository Explanation" + + A GitHub hosted website running the [MkDocs-material](https://squidfunk.github.io/mkdocs-material/getting-started/) theme requires the following files in order to function: + + - A `docs` folder: + - A folder that contains all the documents necessary to populate the website's pages. + - **All of the documents that the user needs to change are in here**. + - A `mkdocs.yml` file: + - A `yml` file which contains critical information on the website structure, including themes, fonts, and extensions. + - A `requirements.txt` file: + - A file with a list of software necessary to build the website, primilily used by GitHub Actions. + - A `.github/workflow` folder: + - Contains the `ghpages.yml` file that controls the GitHub Action. + + The structure of the basic repository is the following: + + ``` + . + ├── README.md + ├── mkdocs.yml <- Governing file for website building + ├── requirements.txt <- Requirements file for pip installation (required by website) + ├── docs + │ ├── assets <- Folder for images and additional graphic assets + │ └── index.md <- Main website home page + └── .github + └── workflows + └── ghpages.yml <- GitHub Actions controlling file + ``` + + Upon pushing changes, a `gh-pages` branch will be automatically created by the GitHub Action; it is where the website is rendered from. + + +### Directions A: forking an existing repo + +!!! warning "Prerequisites" + You will require the following in case you want to add code locally. + + ??? Info "1. Create a GitHub account" + Navigate to the [GitHub website](https://github.com/) and click *Sign Up*, and follow the on screen instructions. + ??? Info "2. Generate a Token" + You can follow the official documentation on how to generate Tokens [here](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens). We discussed how to generate tokens in [Week 0](https://foss.cyverse.org/00_basics/#adding-code-locally). Here's are quick steps you can follow in order to setup your account on your machine using tokens: + + 1. On your coumputer: + 1. Clone your repository (`git clone `) + 2. Make changes where necessary, and **add** (`git add `), **commit** (`git commit -m ""`) and **push** your changes (`git push origin`). + 3. You should be prompted to logging in your GitHub account. Put your email **but not your password**. Instead, open your web browser and follow the steps below: + 2. On GitHub: + 1. Navigate to your GitHub Settings (You can access your account Settings from the dropdown menu where your account icon is, on the top right of the screen) + 2. Scroll to the bottom of the left hand side menu to find *Developer settings* and open it. + 3. Click *Personal access tokens* > *Tokens (classic)* + 4. Click *Generate new token* > *Generate new token (classic)*. You might need to input your Authentification code if you have enabled 2FA. + 5. Give it a name, and all the scopes you require (tip: select all scopes and *No Expiration*), then click *Generate Token*. **Copy the new generated Token** + 3. Back on your computer: + 1. If you have been following the steps above, you should still be in your shell with GitHub still asking for your password. + 2. **Paste** your Token here, and you should be logging in. Your changes should then be saved to GitHub. + +1. [Fork](https://docs.github.com/en/get-started/quickstart/fork-a-repo) or [import](https://docs.github.com/en/get-started/importing-your-projects-to-github/importing-source-code-to-github/importing-a-repository-with-github-importer) the [FOSS Reference Hub website tutorial repository branch](https://github.com/CyVerse-learning-materials/foss-reference-hub/tree/foss-tut) + - Forking or importing will allow you to have your own copy of a specific repository; Cloning a repository **without** forking/importing it first, will lead to changes not being applied to your own copy of the repo, but to the original repository. You should clone your forked or imported repository, not the original! +2. Navigate to *Settings* > *Actions* > *General*: + - Under *Action Permissions* select *Allow all actions and reusalbe workflows* + - Under *Workflow permissions* select *Read and write permissions* and *Allow GitHub Actions to create and approve pull requests* +3. Edit the `mkdocs.yml` and push your changes + - The first changes you should be making are in the first few lines in the `mkdocs.yml` file in order to reflect your necessities: + - Line 1: `site_name:` change to any title you want for your website + - Line 2: `site_description:` give a short description of the website + - Line 3: `site_author: ` who you are + - Line 4: `site_url:` change it to the URL reflected in *Settings*, which will most likely be `https:///` + - Line 7: `repo_name: ` give the name of your repository (e.g., `academicpages-mkdocs` in this case) + - Line 8: `repo_url:` give the git repository URL + - Line 11: `copyright:` change `your name` to the maintainer of the website (likely to be you) + !!! warning "Workflow expectations" + The previos changes *should* trigger the GitHub action workflow, which is setup to apply changes to the website every time a commit is pushed. One of the first thing that `mkdocs-material` will do, is to create the `gh-pages` branch (in case you do not have it already). **The workflow will fail because the `ghpages.yml` in the `.github/workflows` directory is disabled (["commented out"](https://en.wiktionary.org/wiki/comment_out))**. To enable it, remove the `#` at the beginnig on each line and commit your changes. Upon changes, the workflow should go ahead and create the `gh-pages` branch. +4. Navigate to *Settings* > *Pages* and make sure that *Source* is *Deploy from a branch* and Branch is *gh-pages*, */(root)* + - You should be able to access your website at `https://.github.io/`. If you cannot find your website, go to the repository's settings page and navigate to *Pages*: your website address will be there. +5. Edit documents as necessary. + - Don't forget to **add**, **commit** and **push** changes! + - Changes will only be visible on the website after a successful push. + - After each push, next to the commit identifier GitHub will show either a yellow circle (:yellow_circle:, meaning building), green check (:material-check:, meaning success), or red cross (:x:, meaning failure). + ??? Tip "Failure? Try again!" + If you've been given the red cross :x:, GitHub will notify you with what went wrong. By clicking on the :x:, GitHub will open up a new page showing you the broken process. + +### Directions B: Creating your own + +!!! warning "Prerequisites" + You will require the following in case you want to add code locally. However, you can do all of these changes directly on GitHub. If you do want to carry out changes locally, you'll need the a GitHub account and a Token. + + ??? Info "1. Create a GitHub account" + Navigate to the [GitHub website](https://github.com/) and click *Sign Up*, and follow the on screen instructions. + ??? Info "2. Generate a Token" + You can follow the official documentation on how to generate Tokens [here](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens). We discussed how to generate tokens in [Week 0](https://foss.cyverse.org/00_basics/#adding-code-locally). Here's are quick steps you can follow in order to setup your account on your machine using tokens: + + 1. On your coumputer: + 1. Clone your repository (`git clone `) + 2. Make changes where necessary, and **add** (`git add `), **commit** (`git commit -m ""`) and **push** your changes (`git push origin`). + 3. You should be prompted to logging in your GitHub account. Put your email **but not your password**. Instead, open your web browser and follow the steps below: + 2. On GitHub: + 1. Navigate to your GitHub Settings (You can access your account Settings from the dropdown menu where your account icon is, on the top right of the screen) + 2. Scroll to the bottom of the left hand side menu to find *Developer settings* and open it. + 3. Click *Personal access tokens* > *Tokens (classic)* + 4. Click *Generate new token* > *Generate new token (classic)*. You might need to input your Authentification code if you have enabled 2FA. + 5. Give it a name, and all the scopes you require (tip: select all scopes and *No Expiration*), then click *Generate Token*. **Copy the new generated Token** + 3. Back on your computer: + 1. If you have been following the steps above, you should still be in your shell with GitHub still asking for your password. + 2. **Paste** your Token here, and you should be logging in. Your changes should then be saved to GitHub. + +1. Create your own repository + - Add a README and a license and keep the repository public +2. Create a `docs` folder + - Within the folder, create an `index.md` file +3. Navigate to *Settings* > *Actions* > *General*: + - Under *Action Permissions* select *Allow all actions and reusalbe workflows* + - Under *Workflow permissions* select *Read and write permissions* and *Allow GitHub Actions to create and approve pull requests* +4. Create an `requirements.txt` file and populate it with the following requirement list: + + ??? abstract "Expand for code!" + ``` + bump2version + coverage + flake8 + grip + ipykernel + livereload + nbconvert>=7 + pip + sphinx + tox + twine + watchdog + wheel + mkdocs-git-revision-date-plugin + mkdocs-jupyter + mkdocs-material + mkdocs-pdf-export-plugin + mkdocstrings + mkdocstrings-crystal + mkdocstrings-python-legacy + #pygments>=2.10,<2.12 + #pymdown-extensions<9.4 + + # Requirements for core + jinja2>=3.0.2 + markdown>=3.2 + mkdocs>=1.4.0 + mkdocs-material-extensions>=1.0.3 + pygments>=2.12 + pymdown-extensions>=9.4 + + # Requirements for plugins + requests>=2.26 + ``` + +5. Create an `mkdocs.yml` file and populate it with the following: + + ??? abstract "Expand for code!" + ``` + site_name: Name of your website + site_description: Tell people what this website is about + site_author: Who you are + site_url: The website URL + + # Repository + repo_name: The repository name + repo_url: The repository URL + edit_uri: edit/main/docs/ + # Copyright + copyright: 'Copyright © 2023 - 2024' + + + # Configuration + theme: + name: material + highlightjs: true + font: + text: Roboto + code: Regular + palette: + scheme: default + + # Features + features: + - navigation.instant + - navigation.tracking + - navigation.tabs + - navigation.tabs.sticky + - navigation.indexes + - navigation.top + - toc.follow + + # 404 page + static_templates: + - 404.html + + # Search feature + include_search_page: false + search_index_only: true + + # Palette and theme (uses personalized colours) + language: en + palette: + primary: custom + accent: custom + icon: + logo: material/cogs + favicon: material/cogs + + # Page tree + nav: + - Home: index.md + + # Extra Plugins + plugins: + - search + - mkdocstrings + - git-revision-date + - mkdocs-jupyter: + include_source: True + ignore_h1_titles: True + + # Extensions (leave as is) + markdown_extensions: + - admonition + - abbr + - attr_list + - def_list + - footnotes + - meta + - md_in_html + - toc: + permalink: true + title: On this page + - pymdownx.arithmatex: + generic: true + - pymdownx.betterem: + smart_enable: all + - pymdownx.caret + - pymdownx.critic + - pymdownx.details + - pymdownx.emoji: + emoji_index: !!python/name:materialx.emoji.twemoji + emoji_generator: !!python/name:materialx.emoji.to_svg + - pymdownx.highlight + - pymdownx.inlinehilite + - pymdownx.keys + - pymdownx.magiclink: + repo_url_shorthand: true + user: squidfunk + repo: mkdocs-material + - pymdownx.mark + - pymdownx.smartsymbols + - pymdownx.superfences: + custom_fences: + - name: mermaid + class: mermaid + format: !!python/name:pymdownx.superfences.fence_code_format + - pymdownx.tabbed + - pymdownx.tasklist: + custom_checkbox: true + - pymdownx.tilde + ``` + +6. Create a `.github/workflows` folder and add a `ghpages.yml` with the following: + + ??? abstract "Expand for code!" + ``` + name: Publish docs via GitHub + on: + push: + branches: + - main + + jobs: + build: + name: Deploy docs + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: 3.9 + - name: run requirements file + run: pip install -r requirements.txt + - name: Deploy docs + run: mkdocs gh-deploy --force + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + ``` + +7. Navigate to *Settings* > *Pages* and make sure that *Source* is *Deploy from a branch* and Branch is *gh-pages*, */(root)* + - You should be able to access your website at `https://.github.io/`. If you cannot find your website, go to the repository's settings page and navigate to *Pages*: your website address will be there. +8. Edit documents as necessary. + - Don't forget to **add**, **commit** and **push** changes! + - Changes will only be visible on the website after a successful push. + - After each push, next to the commit identifier GitHub will show either a yellow circle (:yellow_circle:, meaning building), green check (:material-check:, meaning success), or red cross (:x:, meaning failure). + +--- + +### Further Documentation + +Here are some guides that you may find useful: + +- [MarkDown cheatsheet](https://www.markdownguide.org/cheat-sheet/): for correct MarkDown synthax. +- [MkDocs-material](https://squidfunk.github.io/mkdocs-material/getting-started/): a starting guide to MkDocs Material theme ([massive list of supported emojis here](https://squidfunk.github.io/mkdocs-material/reference/icons-emojis/#search)). +- [MkDocs-material References](https://squidfunk.github.io/mkdocs-material/reference/): more sophisticated documentation for MkDocs Material. +- [YouTube link to FOSS 2022](https://www.youtube.com/watch?v=UQZseJjR_OI&t=3750s&ab_channel=CyVerse.org): Michael explains (~1h mark) his Jekyll-based website and gives a tutorial on how to use [academicpages](https://academicpages.github.io/). + +--- + +## Self-Paced Material + +- [15 Data Science Communities to Join](https://towardsdatascience.com/15-data-science-slack-communities-to-join-8fac301bd6ce){target=_blank} +- [Python & Slack](https://towardsdatascience.com/python-and-slack-a-natural-match-60b136883d4d){target=_blank} +- [Slack CLI notifications](https://samapriya.github.io/projects/slack_notifier_cli_addon/){target=_blank} +- [Meetups](https://www.meetup.com/){target=_blank} + +### GitHub Pages Website Quickstarts + +- [:simple-github: *GitHub Pages*](https://pages.github.com/) + 1. Create a GitHub account + 2. Clone the repo `https://github.com/username/username.github.io` + 3. Create an `index.html` + 4. Push it back to GitHub + +- [:simple-readthedocs: *ReadTheDocs.org*](https://readthedocs.org/) + 1. [Install](https://docs.readthedocs.io/en/stable/install.html) + 2. [Use Github](https://github.com/rtfd/readthedocs.org) + 3. [Create a ReadTheDocs account](https://readthedocs.org/accounts/signup/) + +- [:simple-markdown: *Material MkDocs*](https://squidfunk.github.io/mkdocs-material/getting-started/) + 1. [Install Material](https://squidfunk.github.io/mkdocs-material/getting-started/#installation) + 1. use a [`reqirements.txt`](https://github.com/CyVerse-learning-materials/foss/blob/mkdocs/requirements.txt) + 2. or `pip install mkdocs-material` + 2. Clone a repository with an existing template or create a new repo with `mkdocs new .` + 3. Run `python -m mkdocs serve` to build and serve locally + 4. Open your browser to preview the build at https://localhost:8000` + +- [:material-book-arrow-down: *Bookdown*](https://bookdown.org/) + 1. [Install R and RStudio](https://www.rstudio.com/products/rstudio/download/) + 2. Install Bookdown package with `install.packages("bookdown", dependencies=TRUE)` + 3. Open the Bookdown demo and get started + +- [:simple-r: *Quarto*](https://quarto.org/) + - [Follow these instructions](https://quarto.org/docs/publishing/github-pages.html) + +- [:simple-jupyter: *JupyterBook*](https://jupyterbook.org/en/stable/intro.html) + - [Create your first book](https://jupyterbook.org/en/stable/start/your-first-book.html) + +- [:simple-git: *GitBook*](https://docs.gitbook.com/) + - [Follow Template builder](https://app.gitbook.com/join) + + +--- + +## Self Assessment + +??? Question "True or False: Tutorials and How-to-Guides are the same" + + !!! Success "False" + + Tutorials are in general introductory and longer than How-to-Guides and are intended for teaching learners a new concept by describing applications and providing justifications. + + How-to-Guides are more like cooking recipes which include step-by-step instructions for a specific task. + +??? Question "True or False: Teams should communicate over a single messaging platform." + + !!! Success "False" + + While it may be advisable to push informal communication toward a platform like SLACK or Microsoft Teams, there is no one-platform-fits-all solution for managing a diverse science team. + +??? Question "What is the best communication platform for team science?" + + !!! Info "There is no best platform, but there are some best practices" + + In general, communications amongst team members may be best suited for messaging services like SLACK, Teams, or Chat. + + For software development, GitHub Issues are one of the primary means of documenting changes and interactions on the web. + + Formal communication over email is preferred, and is necessary for legal, budgetary, and institutional interactions. diff --git a/04_documentation_communication/index.html b/04_documentation_communication/index.html new file mode 100644 index 000000000..fae2beaaf --- /dev/null +++ b/04_documentation_communication/index.html @@ -0,0 +1,1946 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 4. Documentation and Communication - CyVerse Foundational Open Science Skills 2023 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Documentation & Communication

+
+

Learning Objectives

+

After this lesson, you should be able to:

+
    +
  • Identify and explain different types of project documentation (both internal and external)
  • +
  • Describe tools and approaches to creating your own documentation
  • +
  • Describe best practices for maintaining documentation
  • +
  • Identify and explain different communication strategies for working in a team (virtual and in person)
  • +
  • Create your own GitHub Pages website (!)
  • +
+
+

Peer-reviewed manuscripts or conference preceedings / presentations /posters are one of the primary ways of communicating science, but they are far from the only avenues of communcation available to us as researchers and educators.

+

As our methods become more complicated and customized, open science means giving people a better understanding of our approaches and tools than may be required in most journals.

+

Communicating amongst a team of researchers that may span institutions, time zones, or continents also requires more modern approaches.

+

Strong frameworks for internal communication and documentation can make collaboration easier, improve the quality of your science, and reduce the hassle of collaborative work.

+

Project Documentation

+

Documentation is not only the practice of recording, preserving, and organizing information, data, or details in a structured and systematic manner. Documentation is essentially the ability to communicate with your future self, or your collaborators, or the world specific ideas and information. Effective documentation must take into consideration the following points:

+
    +
  • Clarity: Documentation should be easy to understand with clear language and no ambiguity.
  • +
  • Completeness: It must cover all essential details, leaving nothing crucial undocumented.
  • +
  • Accuracy: Information should be up-to-date and correct to prevent errors and misunderstandings.
  • +
  • Organization: A logical structure and clear organization make it easy to navigate and find information.
  • +
  • Relevance: Documentation should focus on what's pertinent to its intended audience or purpose, avoiding unnecessary information.
  • +
+
+

We've already covered many best practices regarding project and data management, topics which are very much intertwined with documentation. Here are some things to keep in mind when addressing documentation:

+
    +
  • Metadata: Implement standardized metadata formats for research outputs to enhance findability.
  • +
  • Data Repositories: Deposit research data and materials in trusted data repositories that adhere to FAIR principles, making them easily accessible and reusable.
  • +
  • Persistent Identifiers: Assign persistent identifiers (e.g., DOIs, ORCIDs) to datasets, publications, and researchers, ensuring their long-term accessibility and recognition.
  • +
  • Templates: Create and utilize documentation templates for research datasets, methods, and software to ensure uniform and comprehensive information capture. It saves time!!
  • +
+
+

Not all documentation is the same. The documentation system, by Divio, categorizes the different types of documentation into 4 quadrants:

+
+

xkcd +

Read more in depth on the documentation system here: https://documentation.divio.com

+
+
+Explanining the quadrants +
    +
  • Tutorials: Lessons! Tutorials are lessons that take the reader by the hand through a series of steps to complete a project of some kind. They are what your project needs in order to show a beginner that they can achieve something with it.
  • +
  • How-to-guides: Recipes! How-to-guides take the reader through the steps required to solve a real-world problem.
  • +
  • References: Guides! References offer technical descriptions of the machinery and how to operate it. References have one job only: to describe. They are code-determined, because ultimately that’s what they describe: key classes, functions, APIs, and so they should list things like functions, fields, attributes and methods, and set out how to use them.
  • +
  • Explanation: Discussions! The aims of explanations are to clarify and illuminate a particular topic by broadening the documentation’s coverage of a topic.
  • +
+
+
+

Furthermore, one of the main issues with documentation usually becomes keeping documents up-to-date. Here are some tips one can keep in mind to address the issue:

+
    +
  • Data Lifecycle Planning: Develop a clear data management and documentation plan at the outset of the research project to ensure consistency and continuity.
  • +
  • Version Control: Use version control systems like Git for code and documentation to track changes, facilitate collaboration, and maintain a history of updates.
  • +
  • Workflows and Automation: Explore automation tools for documentation, such as generating metadata from data headers or embedding documentation within code using tools like GitHub pages, Sphinx and LaTeX.
  • +
  • Collaborative Documentation Platforms: Employ collaborative platforms like Overleaf or Google Docs to enable multiple researchers to contribute to and update documentation.
  • +
  • Documentation Reviews: Schedule regular documentation reviews within research teams to identify gaps or outdated information and address them promptly.
  • +
+
+

Public Repositories for Documentation

+

This website is rendered using GitHub Pages using MkDocs and the Material theme for MkDocs.

+

Other popular website generators for GitHub Pages are Jekyll Theme or Bootstrap.js.

+

ReadTheDocs.org has become a popular tool for developing web-based documentation. Think of RTD as "Continuous Documentation".

+

Bookdown is an open-source R package that facilitates writing books and long-form articles/reports with R Markdown.

+

Quarto is an open-source scientific and technical publishing system built on Pandoc

+

Confluence Wikis (CyVerse) are another tool +for documenting your workflow.

+
+

Things to remember about Documentation

+
    +
  • +

    Documentation should be written in such a way that people who did not write the documentation can read and then use or read and then teach others in the applications of the material.

    +
  • +
  • +

    Documentation is best treated as a living document, but version control is necessary to maintain it

    +
  • +
  • +

    Technology changes over time, expect to refresh documentation every 3-5 years as your projects age and progress.

    +
  • +
+
+

GitHub Pages

+
    +
  • You can pull templates from other GitHub users for your website, + e.g. Jekyll themes
  • +
  • GitHub pages are free, fast, and easy to build, but limited in use + of subdomain or URLs.
  • +
+

ReadTheDocs

+
    +
  • publishing websites via + ReadTheDocs.com costs money.
  • +
  • You can work in an offline state, where you develop the materials + and publish them to your localhost using + Sphinx
  • +
  • You can work on a website template in a GitHub repository, and + pushes are updated in near real time using ReadTheDocs.com.
  • +
+

Material MkDocs

+
    +
  • publish via GitHub Actions
  • +
  • Uses open source Material or ReadTheDocs Themes
  • +
+

Bookdown

+ +

Quarto

+ +

JupyterBook

+ +

GitBook

+
    +
  • GitBook websites use MarkDown syntax
  • +
  • Free for open source projects, paid plans are available
  • +
+

Websites to Host Methods & Protocols

+

Open Science Framework for free. OSF can be directly linked to your ORCID.

+
    +
  • Integrated project management tools
  • +
  • Uses templates to create a project website
  • +
  • Can publish preprints from within project management tools
  • +
+

Protocols.io - collaborative platform and preprint server for: science methods, computational workflows, clinical trials, operational procedures, safety checklists, and instructions / manuals.

+

QUBES - community of math and biology educators who share resources and methods for preparing students to tackle real, complex, biological problems.

+
+What are the benefits of using a GitHub.io website? +

Github Pages are hosted directly from your GitHub repository.

+

Just edit, push, and your changes are live.

+

You do not need to run your own web server!!

+
+
+

Communication

+

Internal Project

+

Choosing which software to use for your internal lab communication can be complicated by the cost of setting up, the cost of maintaining, and simply by the sheer number of platforms that are out there.

+

For this workshop, we use SLACK (Searchable Log of All Conversation & Knowledge). Microsoft's competitor to SLACK is Microsoft Teams.

+

Remember, the intention of these platforms are to improve productivity & not become a distraction.

+

SLACK

+
    +
  • Slack has plenty of apps for coordinating + multiple services, i.e. Calendars, Github, GoogleDrive, Box, etc.
  • +
  • Free Slack is limiting (e.g., 90 day history; limited connections across workspaces).
  • +
  • Paid Slack is $7.25 per user per month. (10 users for 1 year = $870)
  • +
+

Microsoft Teams

+
    +
  • Teams is used by many R1 research universities as part of their + campus wide license agreement for Office 365 Business and Education
  • +
  • For example, anyone with a arizona.edu email address can use Teams for free
  • +
  • Limitations:
      +
    • Not sure you can create your own Teams
    • +
    • Limited to messaging with people in your university Team
    • +
    +
  • +
+

Other popular alternatives

+ +
+

Useful links for creating a SLACK workspace

+
1.  [Create a new Workspace](https://get.slack.help/hc/en-us/articles/206845317-Create-a-Slack-workspace){target=_blank}
+2.  [Create channels, add apps & tools](https://get.slack.help/hc/en-us/articles/217626298-tips-for-team-creators-and-admins){target=_blank}
+
+
+
+

External (Public)

+

Communicating with the public and other members of your science community (in addition to traditional peer-review publications and conferences) is one of the most important parts of your science!

+

There are many ways scientists use social media and the web to share their data science ideas:

+
    +
  1. "Science Twitter" - + is really just regular Twitter, but with a + focus on following other scientists and organizations, and tweeting + about research you're interested in. By building up a significant + following, more people will know you, know about your work, and + you'll have a higher likelihood of meeting other new collaborators.
  2. +
  3. Blogging Platforms such as Medium are a great place to self publish your writing on just about any topic. + It's free to sign up and start blogging, but does have a fee for accessing premium content. Some of my favorite blogs include Toward Data Science + and Chris Holmes.
  4. +
  5. Community groups - There are lists (and lists of lists) of + nationals research organizations, + in which a researcher can become involved. These older organziations + still rely on official websites, science journal blogs, and email + lists to communicate with their members. In the earth sciences there + are open groups which focus on communication like the Earth Science + Information Partners (ESIP) with + progressive ideas about how data and science can be done. Other + groups, like The Carpentries and + Research Bazaar are + focused on data science training and digital literacy.
  6. +
  7. Podcasts - Creating and distributing audio content to masses is easier than ever before. There are many podcast hosting platforms including Spotify, Podbean, Acast, and Libsyn. From there is it simple to make your podcast availble in the Google Podcast app or Apple Podcast app.
  8. +
  9. Webinars - With platforms such as Zoom, Microsoft Teams, and Google Meet, it is so easy nowadays to host a webinar touting and explaining your science.
  10. +
  11. Youtube - The king of video sharing platforms is a great place to post content promoting your science (and yourself!). For example, Cyverse posts lots of content on cyberinfrastructure and data processing pipelines. Some of my favorite podcasts hosted on Youtube include StarTalk and Lex Fridman.
  12. +
+
+

Important

+
**Remember: Personal and Professional Accounts are Not Isolated**
+
+You decide what you post on the internet. Your scientist identity may be
+a part of your personal identity on social media, it might be separate.
+A future employer or current employer can see your old posts. What you
+post in your personal accounts can be considered a reflection of the
+organization you work for and may be used in decisions about hiring or
+dismissal.
+
+
+

Addressing Effective Communication

+

Whether internal or external, communication is important because it serves as the foundation for the exchange of information, ideas, and knowledge, enabling collaboration, understanding, and the advancement of individuals, organizations, and societies. It is therefore fundamental to be able to efficiently communicate, whether it is to promote a piece of scientific advancement, or reaching out to ask for help.

+

The act of balancing transparency, openness, ethicality and respectful to personal data and intellectual property in communication has always been a challenge. A few methods to address these issues are the following:

+
    +
  • Utilize licensing options: Researchers can use open licensing mechanisms such as Creative Commons licenses to specify the terms under which their work can be shared, modified, and reused, balancing openness with protection.
  • +
  • Data anonymization: To address privacy and ethical concerns, researchers can anonymize sensitive data before sharing it, allowing for openness without compromising privacy.
  • +
  • Collaborative agreements: Collaborative research agreements and partnerships can define expectations regarding data sharing, authorship, and intellectual property, ensuring transparency while safeguarding interests.
  • +
  • Transparent communication: Researchers can openly communicate their intentions and progress regarding sharing and publication, fostering trust and collaboration within the research community.
  • +
+

Scientists might also want to keep in mind the following when addressing communication:

+
    +
  • Cultural shift: Resistance to open communication within traditional academic cultures can hinder progress. Taking initiative and demonstrating the advantages of scientific communication outside of the traditional methods (mail, journals) can really help with showing eagerness and devotion to an idea or project
  • +
  • Accessibility and inclusivity: Ensuring that open communication is accessible to all, regardless of language, disability, or geographical location, is vital. Efforts to provide translations, accessible formats, and international collaboration can promote inclusivity.
  • +
  • Quality control: Maintaining peer review and quality control in open access publications is essential. Initiatives like open peer review and establishing reputable open access journals can address this challenge.
  • +
  • Advocacy and policy: Advocacy for open science policies at institutional and governmental levels can help overcome systemic barriers. Engaging with policymakers and advocating for open science initiatives is crucial.
  • +
+
+

Hands-on: Building a GitHub Pages Website using MkDocs

+

This section is built in order to educate on and simplify the steps necessary that newcomers need to take in order to build a successful GitHub Pages hosted website.

+

This tutorial is inspired by academicpages, a Jekyll themed template created in order to help scientists and academics build their own websites.

+

The easy way would be to fork/import the foss-reference-hub website (repository) and modify it to reflect your requirements; this tutorial will cover the necessary files and repository structure you require in order to build a successful personal website.

+
+

Repository Explanation

+

A GitHub hosted website running the MkDocs-material theme requires the following files in order to function:

+
    +
  • A docs folder:
      +
    • A folder that contains all the documents necessary to populate the website's pages.
    • +
    • All of the documents that the user needs to change are in here.
    • +
    +
  • +
  • A mkdocs.yml file:
      +
    • A yml file which contains critical information on the website structure, including themes, fonts, and extensions.
    • +
    +
  • +
  • A requirements.txt file:
      +
    • A file with a list of software necessary to build the website, primilily used by GitHub Actions.
    • +
    +
  • +
  • A .github/workflow folder:
      +
    • Contains the ghpages.yml file that controls the GitHub Action.
    • +
    +
  • +
+

The structure of the basic repository is the following:

+
.
+├── README.md
+├── mkdocs.yml              <- Governing file for website building
+├── requirements.txt        <- Requirements file for pip installation (required by website)      
+├── docs                           
+│   ├── assets              <- Folder for images and additional graphic assets
+│   └── index.md            <- Main website home page
+└── .github
+    └── workflows
+        └── ghpages.yml     <- GitHub Actions controlling file
+
+

Upon pushing changes, a gh-pages branch will be automatically created by the GitHub Action; it is where the website is rendered from.

+
+

Directions A: forking an existing repo

+
+

Prerequisites

+

You will require the following in case you want to add code locally.

+
+1. Create a GitHub account +

Navigate to the GitHub website and click Sign Up, and follow the on screen instructions.

+
+
+2. Generate a Token +

You can follow the official documentation on how to generate Tokens here. We discussed how to generate tokens in Week 0. Here's are quick steps you can follow in order to setup your account on your machine using tokens:

+
    +
  1. On your coumputer:
      +
    1. Clone your repository (git clone <repository>)
    2. +
    3. Make changes where necessary, and add (git add <changed files>), commit (git commit -m "<message on changes>") and push your changes (git push origin).
    4. +
    5. You should be prompted to logging in your GitHub account. Put your email but not your password. Instead, open your web browser and follow the steps below:
    6. +
    +
  2. +
  3. On GitHub:
      +
    1. Navigate to your GitHub Settings (You can access your account Settings from the dropdown menu where your account icon is, on the top right of the screen)
    2. +
    3. Scroll to the bottom of the left hand side menu to find Developer settings and open it.
    4. +
    5. Click Personal access tokens > Tokens (classic)
    6. +
    7. Click Generate new token > Generate new token (classic). You might need to input your Authentification code if you have enabled 2FA.
    8. +
    9. Give it a name, and all the scopes you require (tip: select all scopes and No Expiration), then click Generate Token. Copy the new generated Token
    10. +
    +
  4. +
  5. Back on your computer:
      +
    1. If you have been following the steps above, you should still be in your shell with GitHub still asking for your password.
    2. +
    3. Paste your Token here, and you should be logging in. Your changes should then be saved to GitHub.
    4. +
    +
  6. +
+
+
+
    +
  1. Fork or import the FOSS Reference Hub website tutorial repository branch
      +
    • Forking or importing will allow you to have your own copy of a specific repository; Cloning a repository without forking/importing it first, will lead to changes not being applied to your own copy of the repo, but to the original repository. You should clone your forked or imported repository, not the original!
    • +
    +
  2. +
  3. Navigate to Settings > Actions > General:
      +
    • Under Action Permissions select Allow all actions and reusalbe workflows
    • +
    • Under Workflow permissions select Read and write permissions and Allow GitHub Actions to create and approve pull requests
    • +
    +
  4. +
  5. Edit the mkdocs.yml and push your changes
      +
    • The first changes you should be making are in the first few lines in the mkdocs.yml file in order to reflect your necessities:
        +
      • Line 1: site_name: change to any title you want for your website
      • +
      • Line 2: site_description: give a short description of the website
      • +
      • Line 3: site_author: who you are
      • +
      • Line 4: site_url: change it to the URL reflected in Settings, which will most likely be https://<github-username.github.io>/
      • +
      • Line 7: repo_name: give the name of your repository (e.g., academicpages-mkdocs in this case)
      • +
      • Line 8: repo_url: give the git repository URL
      • +
      • Line 11: copyright: change your name to the maintainer of the website (likely to be you)
      • +
      +
    • +
    +
    +

    Workflow expectations

    +

    The previos changes should trigger the GitHub action workflow, which is setup to apply changes to the website every time a commit is pushed. One of the first thing that mkdocs-material will do, is to create the gh-pages branch (in case you do not have it already). The workflow will fail because the ghpages.yml in the .github/workflows directory is disabled ("commented out"). To enable it, remove the # at the beginnig on each line and commit your changes. Upon changes, the workflow should go ahead and create the gh-pages branch.

    +
    +
  6. +
  7. Navigate to Settings > Pages and make sure that Source is Deploy from a branch and Branch is gh-pages, /(root)
      +
    • You should be able to access your website at https://<github-username>.github.io/. If you cannot find your website, go to the repository's settings page and navigate to Pages: your website address will be there.
    • +
    +
  8. +
  9. Edit documents as necessary.
      +
    • Don't forget to add, commit and push changes!
    • +
    • Changes will only be visible on the website after a successful push.
    • +
    • After each push, next to the commit identifier GitHub will show either a yellow circle (🟡, meaning building), green check (, meaning success), or red cross (❌, meaning failure).
    • +
    +
    +Failure? Try again! +

    If you've been given the red cross ❌, GitHub will notify you with what went wrong. By clicking on the ❌, GitHub will open up a new page showing you the broken process.

    +
    +
  10. +
+

Directions B: Creating your own

+
+

Prerequisites

+

You will require the following in case you want to add code locally. However, you can do all of these changes directly on GitHub. If you do want to carry out changes locally, you'll need the a GitHub account and a Token.

+
+1. Create a GitHub account +

Navigate to the GitHub website and click Sign Up, and follow the on screen instructions.

+
+
+2. Generate a Token +

You can follow the official documentation on how to generate Tokens here. We discussed how to generate tokens in Week 0. Here's are quick steps you can follow in order to setup your account on your machine using tokens:

+
    +
  1. On your coumputer:
      +
    1. Clone your repository (git clone <repository>)
    2. +
    3. Make changes where necessary, and add (git add <changed files>), commit (git commit -m "<message on changes>") and push your changes (git push origin).
    4. +
    5. You should be prompted to logging in your GitHub account. Put your email but not your password. Instead, open your web browser and follow the steps below:
    6. +
    +
  2. +
  3. On GitHub:
      +
    1. Navigate to your GitHub Settings (You can access your account Settings from the dropdown menu where your account icon is, on the top right of the screen)
    2. +
    3. Scroll to the bottom of the left hand side menu to find Developer settings and open it.
    4. +
    5. Click Personal access tokens > Tokens (classic)
    6. +
    7. Click Generate new token > Generate new token (classic). You might need to input your Authentification code if you have enabled 2FA.
    8. +
    9. Give it a name, and all the scopes you require (tip: select all scopes and No Expiration), then click Generate Token. Copy the new generated Token
    10. +
    +
  4. +
  5. Back on your computer:
      +
    1. If you have been following the steps above, you should still be in your shell with GitHub still asking for your password.
    2. +
    3. Paste your Token here, and you should be logging in. Your changes should then be saved to GitHub.
    4. +
    +
  6. +
+
+
+
    +
  1. Create your own repository
      +
    • Add a README and a license and keep the repository public
    • +
    +
  2. +
  3. Create a docs folder
      +
    • Within the folder, create an index.md file
    • +
    +
  4. +
  5. Navigate to Settings > Actions > General:
      +
    • Under Action Permissions select Allow all actions and reusalbe workflows
    • +
    • Under Workflow permissions select Read and write permissions and Allow GitHub Actions to create and approve pull requests
    • +
    +
  6. +
  7. +

    Create an requirements.txt file and populate it with the following requirement list:

    +
    +Expand for code! +
    bump2version
    +coverage
    +flake8
    +grip
    +ipykernel
    +livereload
    +nbconvert>=7
    +pip
    +sphinx
    +tox
    +twine
    +watchdog
    +wheel
    +mkdocs-git-revision-date-plugin 
    +mkdocs-jupyter 
    +mkdocs-material 
    +mkdocs-pdf-export-plugin
    +mkdocstrings 
    +mkdocstrings-crystal
    +mkdocstrings-python-legacy
    +#pygments>=2.10,<2.12
    +#pymdown-extensions<9.4
    +
    +# Requirements for core
    +jinja2>=3.0.2
    +markdown>=3.2
    +mkdocs>=1.4.0
    +mkdocs-material-extensions>=1.0.3
    +pygments>=2.12
    +pymdown-extensions>=9.4
    +
    +# Requirements for plugins
    +requests>=2.26
    +
    +
    +
  8. +
  9. +

    Create an mkdocs.yml file and populate it with the following:

    +
    +Expand for code! +
    site_name: Name of your website
    +site_description: Tell people what this website is about
    +site_author: Who you are
    +site_url: The website URL
    +
    +# Repository
    +repo_name: The repository name
    +repo_url: The repository URL
    +edit_uri: edit/main/docs/
    +# Copyright
    +copyright: 'Copyright &copy; 2023 - 2024'
    +
    +
    +# Configuration
    +theme:
    +    name: material
    +highlightjs: true
    +font:
    +    text: Roboto
    +    code: Regular
    +palette:
    +    scheme: default
    +
    +# Features  
    +features:
    +- navigation.instant
    +- navigation.tracking
    +- navigation.tabs
    +- navigation.tabs.sticky
    +- navigation.indexes
    +- navigation.top
    +- toc.follow
    +
    +# 404 page
    +static_templates:
    +    - 404.html
    +
    +# Search feature
    +include_search_page: false
    +search_index_only: true
    +
    +# Palette and theme (uses personalized colours)
    +language: en
    +palette:
    +    primary: custom
    +    accent: custom
    +icon:
    +    logo: material/cogs
    +    favicon: material/cogs
    +
    +# Page tree
    +nav:
    +- Home: index.md
    +
    +# Extra Plugins
    +plugins:
    +    - search
    +    - mkdocstrings
    +    - git-revision-date
    +    - mkdocs-jupyter:
    +        include_source: True
    +        ignore_h1_titles: True
    +
    +# Extensions (leave as is)
    +markdown_extensions:
    +- admonition
    +- abbr
    +- attr_list
    +- def_list
    +- footnotes
    +- meta
    +- md_in_html
    +- toc:
    +    permalink: true
    +    title: On this page
    +- pymdownx.arithmatex:
    +    generic: true
    +- pymdownx.betterem:
    +    smart_enable: all
    +- pymdownx.caret
    +- pymdownx.critic
    +- pymdownx.details
    +- pymdownx.emoji:
    +    emoji_index: !!python/name:materialx.emoji.twemoji
    +    emoji_generator: !!python/name:materialx.emoji.to_svg
    +- pymdownx.highlight
    +- pymdownx.inlinehilite
    +- pymdownx.keys
    +- pymdownx.magiclink:
    +    repo_url_shorthand: true
    +    user: squidfunk
    +    repo: mkdocs-material
    +- pymdownx.mark
    +- pymdownx.smartsymbols
    +- pymdownx.superfences:
    +    custom_fences:
    +        - name: mermaid
    +        class: mermaid
    +        format: !!python/name:pymdownx.superfences.fence_code_format
    +- pymdownx.tabbed
    +- pymdownx.tasklist:
    +    custom_checkbox: true
    +- pymdownx.tilde
    +
    +
    +
  10. +
  11. +

    Create a .github/workflows folder and add a ghpages.yml with the following:

    +
    +Expand for code! +
    name: Publish docs via GitHub
    +on:
    +push:
    +    branches:
    +    - main
    +
    +jobs:
    +build:
    +    name: Deploy docs
    +    runs-on: ubuntu-latest
    +    steps:
    +    - uses: actions/checkout@v3
    +    - uses: actions/setup-python@v4
    +        with:
    +        python-version: 3.9
    +    - name: run requirements file
    +        run:  pip install -r requirements.txt 
    +    - name: Deploy docs
    +        run: mkdocs gh-deploy --force
    +        env:
    +        GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
    +
    +
    +
  12. +
  13. +

    Navigate to Settings > Pages and make sure that Source is Deploy from a branch and Branch is gh-pages, /(root)

    +
      +
    • You should be able to access your website at https://<github-username>.github.io/. If you cannot find your website, go to the repository's settings page and navigate to Pages: your website address will be there.
    • +
    +
  14. +
  15. Edit documents as necessary.
      +
    • Don't forget to add, commit and push changes!
    • +
    • Changes will only be visible on the website after a successful push.
    • +
    • After each push, next to the commit identifier GitHub will show either a yellow circle (🟡, meaning building), green check (, meaning success), or red cross (❌, meaning failure).
    • +
    +
  16. +
+
+

Further Documentation

+

Here are some guides that you may find useful:

+ +
+

Self-Paced Material

+ +

GitHub Pages Website Quickstarts

+ +
+

Self Assessment

+
+True or False: Tutorials and How-to-Guides are the same +
+

False

+

Tutorials are in general introductory and longer than How-to-Guides and are intended for teaching learners a new concept by describing applications and providing justifications.

+

How-to-Guides are more like cooking recipes which include step-by-step instructions for a specific task.

+
+
+
+True or False: Teams should communicate over a single messaging platform. +
+

False

+

While it may be advisable to push informal communication toward a platform like SLACK or Microsoft Teams, there is no one-platform-fits-all solution for managing a diverse science team.

+
+
+
+What is the best communication platform for team science? +
+

There is no best platform, but there are some best practices

+

In general, communications amongst team members may be best suited for messaging services like SLACK, Teams, or Chat.

+

For software development, GitHub Issues are one of the primary means of documenting changes and interactions on the web.

+

Formal communication over email is preferred, and is necessary for legal, budgetary, and institutional interactions.

+
+
+ +
+
+ + + Last update: + 2023-10-18 + + +
+ + + + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/05_version_control/05_version_control.md b/05_version_control/05_version_control.md new file mode 100644 index 000000000..5ea180d2d --- /dev/null +++ b/05_version_control/05_version_control.md @@ -0,0 +1,299 @@ +# Version Control + + +!!! Success "Learning Objectives" + + After this lesson, you should be able to: + + * Understand the basics of `git` as a resource for reproducible programming + * Describe tools and approaches to creating your `git` Repositories + * Describe best practices for maintaining GitHub Organizations and Repositories + * Maintain own GitHub user profile and repositories + +Version control refers to keeping track of the version of a file, set of +files, or a whole project. + +Some version control tools: + +- :material-microsoft-office: Microsoft Office's [*Track Changes*](https://support.microsoft.com/en-au/office/track-changes-in-word-197ba630-0f5f-4a8e-9a77-3712475e806a) functionality +- :simple-apple: Apple's [*Time Machine*](https://support.apple.com/en-us/HT201250) +- :simple-googledocs: Google Docs' [*Version History*](https://support.google.com/docs/answer/190843?hl=en&co=GENIE.Platform%3DDesktop) +- :simple-git: [Git](https://git-scm.com/) + +Version control is as much a philosophy as a set of tools; you don't +need to master Git to utilize version control (though it is certainly a +worthwhile tool for many researchers). + + +
+ ![git_def](https://swcarpentry.github.io/git-novice/fig/phd101212s.png) +
We have all been here, taken by the [Software Carpentry Version Control lesson](https://swcarpentry.github.io/git-novice/01-basics.html).
+
+ +--- + +## :simple-git: Git vs. :simple-github: GitHub + +**Git** is a command-line program for version control of repositories. +It keeps track of changes you make to files in your repository and +stores those changes in a *.git* folder in that repository. +These changes happen whenever you make a **commit**. Git stores the +history of these commits in a "tree", so you can go back to any +previous commit. By keeping track of the **differences** between +commits, Git can be much more efficient than storing an entire copy of +each version in a document's history. + +You could utilize Git completely on its own, on your local computer, and +get a lot of benefits. You will have a history of the changes you made +to a project, allowing you to go back to any old version of your work. +However, where Git really shines is in *collaborative* work. In order to +effectively collaborate with others on a project, you need two basic +features: a way to allow people to work in parallel, and a way to host +repositories somewhere where everyone can access them. The first feature +is **branching**, which is part of Git, and the hosting part can be +taken care of by platforms like GitHub, GitLab, or Bitbucket. We will +focus on GitHub. + +GitHub is a site that can remotely host your Git repositories. By +putting your repository onto GitHub, you get a backup of the repository, +a way to collaborate with others, and a lot of other features. + +
+ ![git_v_github](https://devmountain.com/wp-content/uploads/2022/01/Gitvs_Github-1a-1.jpg) +
Git vs GitHub, simplified
+
+ +### Definitions + +!!! Info "Git-related Definitions" + + **Platforms**: + + - **Git**: tool for version control. + - **GitHub**: hosted server that is also interactive. + + **Locations and directions**: + + - **repo**: short for repository + - **local**: on your personal computer. + - **remote**: somewhere other than your computer. GitHub can host remote + repositories. + - **upstream**: primary or main branch of original repository. + - **downstream**: branch or fork of repository. + + **Actions**: + + - **clone**: copy of a repository that lives locally on your computer. + Pushing changes will affect the repository online. + - **pull**: getting latest changes to the repository on your local + computer. + - the **fetch** command does the same, however one needs to also **merge** the changes, whilst with pull, the merge action is automatic. + - **branch**: a history of changes to a repository. You can have parallel + branches with separate histories, allowing you to keep a "main" + version and development versions. + - **fork**: copy of someone else's repository stored locally on your + account. From forks, you can make pull requests to the main branch. + - **commit**: finalize a change. + - **push**: add changes back to the remote repository. + - **merge**: takes changes from a branch or fork and applies them to the + main. + + !!! tip "These are also commands when paird with `git`!" + Using the following synthax `git ` one can trigger an action. An example is `git pull`, which will pull all of the latest changes in the remote repository. + + **Funtional**: + + - **pull request**: proposed changes to/within a repository. + + - **issue**: suggestions or tasks needed for the repository. Allows you to + track decisions, bugs with the repository, etc. + +
+ ![git_def](https://n7b3p4s2.stackpathcdn.com/article/git-and-github-version-control-local-and-remote-repository/Images/Git%20And%20Github%20Version%20Control.png) +
Visualizing the commands through a workflow example
(graphic's correction: ~~marged~~ merged)
+
+ +--- + +## Practical Git Techniques + +
+ ![vc_path](https://content.cdntwrk.com/files/aHViPTg1NDMzJmNtZD1pdGVtZWRpdG9yaW1hZ2UmZmlsZW5hbWU9aXRlbWVkaXRvcmltYWdlXzYzOTkwY2I4OWU5YTUuanBnJnZlcnNpb249MDAwMCZzaWc9OWJjZTA5NDIxNzY4MWFhZjYyNmEwNWNhYmI1YTUzMWQ%253D) +
The version control path sofware takes before release
+
+ +!!! info "The basic Git life cycle" + + When using Git for your version control, the usual life cycle is the following: + + | Action| Explanation | + |---|---| + | 1. `git clone ` | Clones the target repository to your machine | + | 2. `git status` | Checks whether there are changes in the remote, original repository | + | 3. `git pull`| Pulls any change to your local repository | + | 4. `git add ` | Adds to a future commit any change | + | 5. `git commit -m ""` | Creates the commit and adds a descriptive message | + | 6. `git push` | Pushes the changes commited from local to the remote repository | + + If there are no branches or external pull requests, the *basic* Git life cycle is summarizable like this: + + ```mermaid + graph LR + A[1. git clone] --> B[2. git status] -->C([differences from origin?]):::colorclass; + C-->|yes| D[3. git pull]--> E; + C-->|no| E[4. git add]; + E-->F[5. git commit] -->G[6. git push]; + G-->B; + classDef colorclass fill:#f96 + ``` + +After learning the basics of using Git, which you can learn with the +[Software Carpentry Git Lesson](https://swcarpentry.github.io/git-novice/), there are some next +things that can be useful to learn. Here are a couple topics that are +worth digging into more: + +- **:octicons-log-24: Using the Git log** + - You can access using **git log** + - Will show you your commit history + - Useful for figuring out where you need to roll back to + +- **:material-keyboard-tab-reverse: Reverting** + - There are a lot of different ways to "undo" something in Git + - Some are safer, some are a bit riskier + - Depends on what stage of the commit process you're in + - **Here are some useful resources**: + - [*10 Common Git Problems and How to Fix Them*](https://www.codementor.io/@citizen428/git-tutorial-10-common-git-problems-and-how-to-fix-them-aajv0katd) + - [*"So you have a mess on your hands..."*](http://justinhileman.info/article/git-pretty/git-pretty.png) + - [*How to undo almost anything*](https://github.blog/2015-06-08-how-to-undo-almost-anything-with-git/) + +- **:octicons-git-branch-24: Branching** + - This is important to learn if you're going to be doing any sort of collaboration + - Here is a fantastic resource for learning how git branching really works: https://learngitbranching.js.org/ + - **you will probably have to deal with *merge conflicts* at some point** + - Merge conflicts happen when two branches are being merged, but they have *different* changes to the same part of a file + - Perhaps you are working on a feature branch, and you change line 61 in *file.R*, but someone else made a change to the main branch at line 61 in *file.R*. When you try to merge the feature and main branches, Git won't know which changes to line 61 in *file.R* are correct, and you will need to manually decide. + - Here are some good resources: + - [Resolving merge conflics](https://docs.github.com/en/github/collaborating-with-pull-requests/addressing-merge-conflicts/)resolving-a-merge-conflict-using-the-command-line + - [git - ours & theirs, a CLI resource to help with conflicts](https://nitaym.github.io/ourstheirs/) + +- **:simple-gitignoredotio: .gitignore** + - You often want Git to completely ignore certain files + - Generated files (like HTML files from Markdown docs) + - IDE-specific files like in *.RStudio* or *.vscode* folders + - **really big files, like data or images** + - If you accidentally commit a really big file, GitHub might not let you push that commit + - If you have a huge file in Git, your repository size can get way too big + - This is a pain to solve, so use the *.gitignore* file ahead of time, but if you need to fix this, here is a great resource: + - [Removing Large Files From git Using BFG and a Local Repository](https://necromuralist.github.io/posts/removing-large-files-from-git-using-bfg-and-a-local-repository/) + +--- + +## Git, GitHub and Data + +Git and data don't always go hand in hand. GitHub allows commited files to be uploaded only if the file is of 100MB or less (with a warning being issued for files between 50MB and 100MB). Additionally, [GitHub recommends to keep repositories below the 1GB threshold](https://docs.github.com/en/repositories/working-with-files/managing-large-files/about-large-files-on-github#repository-size-limits), as this also allows for quicker cloning and sharing of the repository. If a large file has been uploaded by mistake and you wish to remove it, [you can follow these instrutctions](https://docs.github.com/en/repositories/working-with-files/managing-large-files/about-large-files-on-github#removing-files-from-a-repositorys-history). + +If you *do* have to work with large files and Git, here are some questions to ask yourself: + +- Is this data shareable? +- Are there alternative file hosting platforms I can use? +- How will this data impact the sharability of this repository? +- Am I using a .gitignore? + +GitHub now offers the [**Git Large File Storage (:simple-gitlfs: Git LFS)**](https://git-lfs.com/): the system works by storing references to the file in your repository, but not the file itself -- it creates a *pointer* file within the repo, and stores the file elsewhere. If you were to clone the repository, the pointer file will act as a map to show you how to obtain the original file. + +Git LFS data upload limits are based on your GitHub subscription: + +- 2 GB for GitHub free and GitHub Pro +- 4 GB for GitHub Team +- 5 GB for GitHub Enterprise Cloud + +
+ ![gitlfs](https://git-lfs.com/images/tweet-promo.png) +
A depiction of how the Git LFS pointer-repository relationship works.
+
+ +--- + +## Useful GitHub Features + +At its core, GitHub is just a place to host your Git repositories. +However, it offers a lot of functionality that has less to do with Git, +and more to do with [**Project Management**](02_project_management.md). We will +walk through a few of these useful features. + +- [**:octicons-issue-opened-16: Issues**](https://docs.github.com/en/issues) + - Issues let you plan out changes and suggestions to a repo + - Closing/reopening + - Labels + - Assigning + - Templates + - Numbering/mentioning + +- [**:material-source-pull: Pull Requests**](https://docs.github.com/en/github/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests) + - Pull requests are a way to request merging code from one branch to another + - typical workflow is for someone to fork a repo, then make a PR from that repo to another + - Reviews + - Commenting + - Merging + - Closing issues + +- [**:octicons-organization-16: Organizations**](https://docs.github.com/en/organizations) + - You can use Organizations to organize sets of repositories + - Roles + - Teams + - GitHub documentation: + +- **Other neat things** + - Permissions/collaborators + - GitHub Classroom + - Gists + - CSV and map rendering + - Code editor + +--- + +## Beyond Git and GitHub + +There are other platforms that address Version Control and have similar functionalities to GitHub: + +- **[:material-gitlab: GitLab](https://gitlab.com/)**: An alternative to GitHub, GitLab offers both a cloud-hosted platform and a self-hosted option ([GitLab CE/EE](https://about.gitlab.com/install/ce-or-ee/)). It provides a comprehensive DevOps platform with built-in CI/CD, container registry, and more. + +- **[:simple-bitbucket: Bitbucket](https://bitbucket.org/product/)**: Atlassian's Bitbucket is a Git repository hosting service that also supports Mercurial repositories. It offers integration with Jira, Confluence, and other Atlassian products. + +- **[:simple-sourceforge: SourceForge](https://sourceforge.net/)**: A platform that provides Git and Subversion hosting, as well as tools for project management, issue tracking, and collaboration. + +- **[:fontawesome-brands-aws: AWS CodeCommit](https://aws.amazon.com/codecommit/)**: Part of Amazon Web Services (AWS), CodeCommit is a managed Git service that integrates seamlessly with other AWS services. + +- **[:simple-azuredevops: Azure DevOps Services (formerly VSTS)](https://azure.microsoft.com/en-us/products/devops))**: Microsoft's Azure DevOps Services offers Git repository hosting along with a wide range of DevOps tools for planning, developing, testing, and deploying software. + +- **[:simple-mercurial: Mercurial](https://www.mercurial-scm.org/)**: Like Git, Mercurial is a distributed version control system, but with a different branching and merging model. It's an alternative to Git for version control. + +--- + +## Self Assessment + +??? Question "True or False: Using `Git` requires a GitHub account" + + !!! Failure "False" + + `Git` is open source software. + + GitHub is a privately owned (Microsoft) company + + Other platforms like [GitLab](https://gitlab.com){target=_blank}, [GitBucket](https://gitbucket.github.io/){target=_blank}, and [GNU Savannah](https://savannah.gnu.org/){target=_blank} all offer `Git` as a version control system service. + +??? Question "True or False: Using `Git` is easy" + + !!! Failure "False" + + Using `Git` can be frustrating to even the most experienced users + +??? Question "When you find a new repository on GitHub that you think can help your research, what are the first things you should do?" + + !!! Success "Look at the README.md" + + Most GitHub repositories have a README.md file which explains what you're looking at. + + !!! Success "Look at the LICENSE" + + Not all repositories are licensed the same way - be sure to check the LICENSE file to see whether the software is open source, or if it has specific requirements for reuse. diff --git a/05_version_control/index.html b/05_version_control/index.html new file mode 100644 index 000000000..64f0c9e0e --- /dev/null +++ b/05_version_control/index.html @@ -0,0 +1,1464 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 5. Version Control - CyVerse Foundational Open Science Skills 2023 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ + + + + + + +

Version Control

+
+

Learning Objectives

+

After this lesson, you should be able to:

+
    +
  • Understand the basics of git as a resource for reproducible programming
  • +
  • Describe tools and approaches to creating your git Repositories
  • +
  • Describe best practices for maintaining GitHub Organizations and Repositories
  • +
  • Maintain own GitHub user profile and repositories
  • +
+
+

Version control refers to keeping track of the version of a file, set of +files, or a whole project.

+

Some version control tools:

+ +

Version control is as much a philosophy as a set of tools; you don't +need to master Git to utilize version control (though it is certainly a +worthwhile tool for many researchers).

+
+

git_def +

We have all been here, taken by the Software Carpentry Version Control lesson.

+
+
+

Git vs. GitHub

+

Git is a command-line program for version control of repositories. +It keeps track of changes you make to files in your repository and +stores those changes in a .git folder in that repository. +These changes happen whenever you make a commit. Git stores the +history of these commits in a "tree", so you can go back to any +previous commit. By keeping track of the differences between +commits, Git can be much more efficient than storing an entire copy of +each version in a document's history.

+

You could utilize Git completely on its own, on your local computer, and +get a lot of benefits. You will have a history of the changes you made +to a project, allowing you to go back to any old version of your work. +However, where Git really shines is in collaborative work. In order to +effectively collaborate with others on a project, you need two basic +features: a way to allow people to work in parallel, and a way to host +repositories somewhere where everyone can access them. The first feature +is branching, which is part of Git, and the hosting part can be +taken care of by platforms like GitHub, GitLab, or Bitbucket. We will +focus on GitHub.

+

GitHub is a site that can remotely host your Git repositories. By +putting your repository onto GitHub, you get a backup of the repository, +a way to collaborate with others, and a lot of other features.

+
+

git_v_github +

Git vs GitHub, simplified

+
+

Definitions

+
+

Git-related Definitions

+

Platforms:

+
    +
  • Git: tool for version control.
  • +
  • GitHub: hosted server that is also interactive.
  • +
+

Locations and directions:

+
    +
  • repo: short for repository
  • +
  • local: on your personal computer.
  • +
  • remote: somewhere other than your computer. GitHub can host remote +repositories.
  • +
  • upstream: primary or main branch of original repository.
  • +
  • downstream: branch or fork of repository.
  • +
+

Actions:

+
    +
  • clone: copy of a repository that lives locally on your computer. +Pushing changes will affect the repository online.
  • +
  • pull: getting latest changes to the repository on your local +computer.
      +
    • the fetch command does the same, however one needs to also merge the changes, whilst with pull, the merge action is automatic.
    • +
    +
  • +
  • branch: a history of changes to a repository. You can have parallel +branches with separate histories, allowing you to keep a "main" +version and development versions.
  • +
  • fork: copy of someone else's repository stored locally on your +account. From forks, you can make pull requests to the main branch.
  • +
  • commit: finalize a change.
  • +
  • push: add changes back to the remote repository.
  • +
  • merge: takes changes from a branch or fork and applies them to the +main.
  • +
+
+

These are also commands when paird with git!

+

Using the following synthax git <command> one can trigger an action. An example is git pull, which will pull all of the latest changes in the remote repository.

+
+

Funtional:

+
    +
  • +

    pull request: proposed changes to/within a repository.

    +
  • +
  • +

    issue: suggestions or tasks needed for the repository. Allows you to +track decisions, bugs with the repository, etc.

    +
  • +
+
+
+

git_def +

Visualizing the commands through a workflow example
(graphic's correction: marged merged)

+
+
+

Practical Git Techniques

+
+

vc_path +

The version control path sofware takes before release

+
+
+

The basic Git life cycle

+

When using Git for your version control, the usual life cycle is the following:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ActionExplanation
1. git clone <repository>Clones the target repository to your machine
2. git statusChecks whether there are changes in the remote, original repository
3. git pullPulls any change to your local repository
4. git add <changes>Adds to a future commit any change
5. git commit -m "<message>"Creates the commit and adds a descriptive message
6. git pushPushes the changes commited from local to the remote repository
+

If there are no branches or external pull requests, the basic Git life cycle is summarizable like this:

+
graph LR
+A[1. git clone] --> B[2. git status] -->C([differences from origin?]):::colorclass;
+C-->|yes| D[3. git pull]--> E;
+C-->|no| E[4. git add];
+E-->F[5. git commit] -->G[6. git push];
+G-->B;
+classDef colorclass fill:#f96
+
+

After learning the basics of using Git, which you can learn with the +Software Carpentry Git Lesson, there are some next +things that can be useful to learn. Here are a couple topics that are +worth digging into more:

+
    +
  • +

    Using the Git log

    +
      +
    • You can access using git log
    • +
    • Will show you your commit history
    • +
    • Useful for figuring out where you need to roll back to
    • +
    +
  • +
  • +

    Reverting

    + +
  • +
  • +

    Branching

    +
      +
    • This is important to learn if you're going to be doing any sort of collaboration
    • +
    • Here is a fantastic resource for learning how git branching really works: https://learngitbranching.js.org/
    • +
    • you will probably have to deal with merge conflicts at some point
        +
      • Merge conflicts happen when two branches are being merged, but they have different changes to the same part of a file
      • +
      • Perhaps you are working on a feature branch, and you change line 61 in file.R, but someone else made a change to the main branch at line 61 in file.R. When you try to merge the feature and main branches, Git won't know which changes to line 61 in file.R are correct, and you will need to manually decide.
      • +
      • Here are some good resources: +
      • +
      +
    • +
    +
  • +
  • +

    .gitignore

    +
      +
    • You often want Git to completely ignore certain files
    • +
    • Generated files (like HTML files from Markdown docs)
    • +
    • IDE-specific files like in .RStudio or .vscode folders
    • +
    • really big files, like data or images
        +
      • If you accidentally commit a really big file, GitHub might not let you push that commit
      • +
      • If you have a huge file in Git, your repository size can get way too big
      • +
      • This is a pain to solve, so use the .gitignore file ahead of time, but if you need to fix this, here is a great resource:
      • +
      • Removing Large Files From git Using BFG and a Local Repository
      • +
      +
    • +
    +
  • +
+
+

Git, GitHub and Data

+

Git and data don't always go hand in hand. GitHub allows commited files to be uploaded only if the file is of 100MB or less (with a warning being issued for files between 50MB and 100MB). Additionally, GitHub recommends to keep repositories below the 1GB threshold, as this also allows for quicker cloning and sharing of the repository. If a large file has been uploaded by mistake and you wish to remove it, you can follow these instrutctions.

+

If you do have to work with large files and Git, here are some questions to ask yourself:

+
    +
  • Is this data shareable?
  • +
  • Are there alternative file hosting platforms I can use?
  • +
  • How will this data impact the sharability of this repository?
  • +
  • Am I using a .gitignore?
  • +
+

GitHub now offers the Git Large File Storage ( Git LFS): the system works by storing references to the file in your repository, but not the file itself -- it creates a pointer file within the repo, and stores the file elsewhere. If you were to clone the repository, the pointer file will act as a map to show you how to obtain the original file.

+

Git LFS data upload limits are based on your GitHub subscription:

+
    +
  • 2 GB for GitHub free and GitHub Pro
  • +
  • 4 GB for GitHub Team
  • +
  • 5 GB for GitHub Enterprise Cloud
  • +
+
+

gitlfs +

A depiction of how the Git LFS pointer-repository relationship works.

+
+
+

Useful GitHub Features

+

At its core, GitHub is just a place to host your Git repositories. +However, it offers a lot of functionality that has less to do with Git, +and more to do with Project Management. We will +walk through a few of these useful features.

+
    +
  • +

    Issues

    +
      +
    • Issues let you plan out changes and suggestions to a repo
    • +
    • Closing/reopening
    • +
    • Labels
    • +
    • Assigning
    • +
    • Templates
    • +
    • Numbering/mentioning
    • +
    +
  • +
  • +

    Pull Requests

    +
      +
    • Pull requests are a way to request merging code from one branch to another
    • +
    • typical workflow is for someone to fork a repo, then make a PR from that repo to another
    • +
    • Reviews
    • +
    • Commenting
    • +
    • Merging
    • +
    • Closing issues
    • +
    +
  • +
  • +

    Organizations

    +
      +
    • You can use Organizations to organize sets of repositories
    • +
    • Roles
    • +
    • Teams
    • +
    • GitHub documentation:
    • +
    +
  • +
  • +

    Other neat things

    +
      +
    • Permissions/collaborators
    • +
    • GitHub Classroom
    • +
    • Gists
    • +
    • CSV and map rendering
    • +
    • Code editor
    • +
    +
  • +
+
+

Beyond Git and GitHub

+

There are other platforms that address Version Control and have similar functionalities to GitHub:

+
    +
  • +

    GitLab: An alternative to GitHub, GitLab offers both a cloud-hosted platform and a self-hosted option (GitLab CE/EE). It provides a comprehensive DevOps platform with built-in CI/CD, container registry, and more.

    +
  • +
  • +

    Bitbucket: Atlassian's Bitbucket is a Git repository hosting service that also supports Mercurial repositories. It offers integration with Jira, Confluence, and other Atlassian products.

    +
  • +
  • +

    SourceForge: A platform that provides Git and Subversion hosting, as well as tools for project management, issue tracking, and collaboration.

    +
  • +
  • +

    AWS CodeCommit: Part of Amazon Web Services (AWS), CodeCommit is a managed Git service that integrates seamlessly with other AWS services.

    +
  • +
  • +

    Azure DevOps Services (formerly VSTS)): Microsoft's Azure DevOps Services offers Git repository hosting along with a wide range of DevOps tools for planning, developing, testing, and deploying software.

    +
  • +
  • +

    Mercurial: Like Git, Mercurial is a distributed version control system, but with a different branching and merging model. It's an alternative to Git for version control.

    +
  • +
+
+

Self Assessment

+
+True or False: Using Git requires a GitHub account +
+

False

+

Git is open source software.

+

GitHub is a privately owned (Microsoft) company

+

Other platforms like GitLab, GitBucket, and GNU Savannah all offer Git as a version control system service.

+
+
+
+True or False: Using Git is easy +
+

False

+

Using Git can be frustrating to even the most experienced users

+
+
+
+When you find a new repository on GitHub that you think can help your research, what are the first things you should do? +
+

Look at the README.md

+

Most GitHub repositories have a README.md file which explains what you're looking at.

+
+
+

Look at the LICENSE

+

Not all repositories are licensed the same way - be sure to check the LICENSE file to see whether the software is open source, or if it has specific requirements for reuse.

+
+
+ +
+
+ + + Last update: + 2023-10-18 + + +
+ + + + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/06_reproducibility_i/06_reproducibility_i.md b/06_reproducibility_i/06_reproducibility_i.md new file mode 100644 index 000000000..70116a4b5 --- /dev/null +++ b/06_reproducibility_i/06_reproducibility_i.md @@ -0,0 +1,663 @@ +# Repeatability and Reproducibility + +!!! Success "Learning Objectives" + + After this lesson, you should be able to: + + * Describe what reproducibility is + * Discriminate between reproducibility, replicability, and repeatability + * Explain why reproducible research is valuable + * Set up a software project with an environment + +The so-called reproducibility crisis (see [1](https://www.nature.com/articles/533452a) , [2](https://www.nature.com/collections/prbfkwmwvz) , [3](https://blogs.scientificamerican.com/observations/to-fix-the-reproducibility-crisis-rethink-how-we-do-experiments/)) is something you have +probably heard about (and maybe one of the reasons you have come to FOSS). +Headlines in the media (such as [Most scientists can't replicate studies by their peers](https://www.bbc.com/news/science-environment-39054778)) +definitely give pause to researchers and ordinary citizens who hope +that the science used to recommend a course of medical treatment or design +self-driving cars is sound. + +## Software Dependency Hell + +Think for a moment about all the branching possibilities for how a computer could be set up: + +- hardware: CPUs, GPUs, RAM +- Operating system: many flavors of Linux, MacOS, Windows +- Software versions: R, Python, etc. +- Package versions: specific R or Python packages, etc. + +Simply trying to get the same setup as anyone else is difficult enough, but you can also run into all sorts of dependencies. Let's say you try to update a package to match the version someone else used for a project. However, after updating it, you realize you need to update 3 other packages. After that, you realize you need a newer version of R. You finally manage to get everything set up, but when you go back to a different project the next week, nothing works! All those updates made your code for your other project break. You spend a week fixing your code to work with the newer software, and you're finally done... but now your advisor gives you a dataset 10x the size and says you'll need to run it on the cloud. You throw your laptop out the window and move to the woods to live the life of a hermit. + +All jokes aside, dealing with software dependencies can be extremely frustrating, and so can setting stuff up on a remote location. It can be even more frustrating if you're trying to reproduce results but you don't actually know the entire software stack used to generate them. + + +## Defining Reproducibility + +!!! Question + + How do you define reproducible science? + + ??? success "Answer" + In [Reproducibility vs. Replicability](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5778115/), Hans Plesser gives the following useful definitions: + + - **Repeatability** (Same team, same experimental setup): The measurement + can be obtained with stated precision by the same team using the same + measurement procedure, the same measuring system, under the same operating + conditions, in the same location on multiple trials. For computational + experiments, this means that a researcher can reliably repeat her own + computation. + - **Replicability** (Different team, same experimental setup): The + measurement can be obtained with stated precision by a different team + using the same measurement procedure, the same measuring system, under the + same operating conditions, in the same or a different location on multiple + trials. For computational experiments, this means that an independent group + can obtain the same result using the author's own artifacts. + - **Reproducibility** (Different team, different experimental setup): The + measurement can be obtained with stated precision by a different team, + a different measuring system, in a different location on multiple trials. + For computational experiments, this means that an independent group can + obtain the same result using artifacts which they develop completely + independently. + + The paper goes on to further simplify: + + - **Methods reproducibility**: provide sufficient detail about procedures + and data so that the same procedures could be exactly repeated. + - **Results reproducibility**: obtain the same results from an independent + study with procedures as closely matched to the original study as + possible. + - **Inferential reproducibility**: draw the same conclusions from either an + independent replication of a study or a reanalysis of the original study. +!!! question "Discussion Question" + How do these definitions apply to your research/teaching? + + Work with your fellow learners to develop a shortlist of ways reproducibility + relates to your work. Try to identify challenges and even successes you'd + like to share. + +Often, when we say "reproducibility" we mean all or at least several of the +concepts the proceeding discussion encompasses. Really, reproducibility can be +thought of as set values such as some laboratories express in a code of conduct, +see for example [Ross-Ibarra Lab code of conduct](https://rilab.ucdavis.edu/expectations.html) or [Bahlai Lab Policies](https://github.com/BahlaiLab/Policies/blob/master/Project_completion.md). +Reproducibility comes from our obligations and desires to work ethically, +honestly, and with confidence that the data and knowledge we produce is done +has integrity. Reproducibility is also a "spectrum of practices", not a +single step. (See figure below from [Peng, 2011](https://science.sciencemag.org/content/334/6060/1226)). + +![spectrum](assets/reproducibility-spectrum.png) + +Assuming you have taken in the potentially anxiety inducing information above, +the most important thing to know is that there is a lot of help to make +reproducibility a foundation of all of your research. + +## Repeatability: a first step + +A big first step on the road to reproducibility is **repeatability**. In the context of computation, this means that you should be able to reliably generate the same results. + +In many ways, this is the biggest hurdle to reproducibility, as it often requires the biggest leap in skills. You can think of repeatability in a few ways. + + +!!! question "Discussion Question" + **Have you ever had any hurdles to reproducing your work?** + + - Have you ever run into a problem that prevented you from generating the same results, figures, analyses as before? + - Have you ever lost time trying to figure out how you (or a collaborator) got a particular result? + - What were the issues you ran into, and how might you have solved them? + +## Stratgies for Improving Repeatability + +### Automation + +In the process of making your work more repeatable, you will often be trying to reduce the amount of work you're doing "by hand". Reducing the human input necessary at each step of a project is a key to reliably reproducing the same results, but it can also help save you a lot of time in the long run. + +Have you ever manually edited a figure for a manuscript, only to be asked to change something that negated all your manual edits? Well, in the short run, it may have been quicker to just tinker with the graph by hand, but in the long run, figuring out how to use code to generate the whole thing would have saved you time. + +Automating tasks often comes with an up-front cost, but it is important for the eventual reproducibility of the work, and will often save you time in the short run. + +Automation also tends to make tasks scale more easily (editing 10 rows of data by hand is fine, editing 10,000 is much harder), adapt to new scenarios, and extend to future projects. + +!!! question "Discussion Question" + **What are some tasks you have automated or want to automate?** + + - Have you ever successfully automated a task? + - Found a way to make something scale or take less time? + - What was the task, and how did you do it? + - Are there any things you wish you could automate? + - What are some barriers to automating them? + +While we often think about writing scripts to clean data, run analyses, and generate figures, there are even more parts of a research project that can be automated. Here are a few examples: + +- data validation +- model checking/validation +- software installation +- report/manuscript generation +- citation management +- email/GitHub/Slack notifications +- workflow itself (using things like make, Snakemake, Nextflow, targets) + +Code can be thought of as a set of machine-actionable instructions, or instructions that we write for a computer to follow. What other sets of instructions do you have, either written down or in your head? How can you turn them into something machine-actionable? + +### Get off your own machine + +More and more work is being done somewhere other than a personal computer. This could be an HPC cluster at a university or a cloud computing provider. "Cloud" just means somebody else is handling the computers, and you get to use them when you need to, typically for a price. Some 'cloud' options include: Binder, Colab, and Cyverse VICE and Github Codespace. + +The take home message on Cloud is that it is a great way to make your work more reproducible, as you can share a link to your work, and anyone can run it without having to install anything. + +### Software Management + +Have you ever tried to run a script, only to realize you had updated a package without knowing, and now the script doesn't work? + +**Package managers** that create and manage **custom environments** can be extremely helpful in keeping software versions aligned with projects. + +In Python, it is common to use `pip` and a `requirements.txt` file, and in R, the `renv` package can be used to keep package versions stable within individual projects. + +--- +## Conda + +[Conda](https://docs.conda.io/en/latest/){target=_blank} is an open-source package management system and also an environment management system. This means that it helps manage libraries and dependencies within different projects and can isolate different versions of packages and even Python itself into different environments to maintain project consistency and avoid conflicts between package versions. + +Here's a breakdown of what Conda offers: + +* **Environment Management:** + + Conda allows users to create isolated environments for their projects. Each environment can have its own set of packages, dependencies, and even its own version of Python. This ensures that different projects can have their own specific requirements without interfering with each other. It allows for consistent and reproducible results across different systems and setups + +* **Package Management:** + + Beyond managing environments, Conda is also a package manager. It can install specific versions of software packages and ensure that all dependencies are met. While it's commonly associated with Python, Conda can also manage packages from other languages. You can search for packges at https://anaconda.org/. + +* **Cross-Platform:** + + Conda is platform-agnostic. This means you can use it across various operating systems like Windows, macOS, and Linux. + +* **Repository Channels:** + + Conda packages are retrieved from repositories known as channels. The default channel has a wide array of commonly used packages. However, users can add third-party channels, such as "conda-forge", to access even more packages or specific versions of packages. You can specify the channel by using the `-c` flag when installing packages. + +* **Integration with Anaconda:** + + Conda is the package and environment manager for the Anaconda distribution, which is a distribution of Python and R for scientific computing and data science. However, Conda can be used independently of Anaconda. + + + +## Reproducibility tutorial + +This section is going to cover a short tutorial on reproducibility using software, tools and practices discussed today and throughout FOSS. + +!!! Note "OS of choice" + + This tutorial will be performed using the [CyVerse CLI (Command Line Interface)](https://de.cyverse.org/apps/de/5f2f1824-57b3-11ec-8180-008cfa5ae621). However, if you'd like to use your own computer feel free to! If you're on Mac or Linux, open your terminal; If you're on Windows, use the Windows Subsystem for Linux (WSL) + + ??? Tip "How to Scroll in Cyverse(Tmux) Cloud Shell" + + If you're using the Cyverse Cloud Shell, you can scroll up and down by pressing `Ctrl + b` and then `[` to enter scroll mode. You can then use the arrow keys to scroll up and down. Press `q` to exit scroll mode. + +!!! Success "Tutorial Goals" + + - Create a small workflow using NextFlow + - Understand best practices for reproducing a workflow + - Apply FOSS procedures in order to enable easiness of reproducibility + +### Prerequisites + +What you'll be using: + +- [GitHub](https://github.com/) (already installed) +- [Conda](https://docs.conda.io/en/latest/) +- [Mamba](https://mamba.readthedocs.io/en/latest/installation.html) (optional, recommended) + +Installable through Conda/Mamba: + +- [Nextflow](https://www.nextflow.io/docs/latest/index.html) +- [Salmon](https://salmon.readthedocs.io/en/latest/) +- [FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/) + +Installabe through Pip: + +- [MultiQC](https://multiqc.info/docs/) + +??? Question "What's a Conda and how do I install it?" + [Conda](https://docs.conda.io/en/latest/) is a popular tool for installing software. Typically software you want to use requires other software (dependancies) to be installed. Conda can manage all of this for you. Each available Conda package is part of a “recipe” that includes everything you need to run your software. There are different versions of Conda, including some specific for bioinformatics like [Bioconda](https://bioconda.github.io/). + + The CyVerse CLI already comes with Conda installed; Please follow these steps in order to install [MiniConda](https://docs.conda.io/en/latest/miniconda.html) (the lightweight version of Conda) on your system. + + For the appropriate installation package, visit https://docs.conda.io/en/latest/miniconda.html. :warning: Note: **If you are using the WSL, install the Linux version!!** + + ``` + # Download conda and add right permissions + wget https://repo.anaconda.com/miniconda/Miniconda3-py39_4.12.0-Linux-x86_64.sh # Modify this to match the OS you're using. + chmod +x Miniconda3-py39_4.12.0-Linux-x86_64.sh + + # install conda silenty (-b) and update (-u) and initial conda run + ./Miniconda3-py39_4.12.0-Linux-x86_64.sh -b -u + ~/miniconda3/bin/conda init + + # Restart bash so that conda is activated + source ~/.bashrc + ``` + + You'll be able to tell when conda is active when next `(base)` is present next to the to the shell prompt such as + + ``` + (base) user@machine + ``` + + Conda should now be installed and can be used to install other necessary packages! + +When you start a Cyverse Cloud shell, it will start you in the directory: +``` +/home/user/work +``` +Let's change to the Data-Store directory, where we will be working for the rest of the tutorial. This is the Cyverse cloud-storage directory where you can put all your data and files. + +``` +cd home +cd +``` +Create our own environment (select `y` when prompted). + +``` +conda create --name myenv +``` + +Activate your new environment with + +``` +conda activate myenv +``` + +You can see the list of environments you can activate by doing + +``` +conda env list +``` + +### Package management with Conda + +We are going to to use conda to install Mamba, NextFlow, Salmon and FastQC. + +``` +# Activate Conda using +conda activate + +# Install Mamba +conda install -c conda-forge mamba +``` + +You can either use the anaconda website to search for packager, or use the [conda search feature](https://docs.conda.io/projects/conda/en/latest/commands/search.html) (but also, Google is your best friend.) + +!!! Note "Makes things faster with Mamba" + + Mamba is a reimplemetation of Conda using the C++ language, allowing for much quicker Conda experience. The tutorial is going to use **Mamba** instead of **Conda**, but you can always replace `mamba` with `conda`! + +!!! Info "Conda channels" + + Conda operates through **channels**, specififc repositories where packages are stored. Specific packages sometimes may appear in multiple channels, however it is always helpful to specify a channel with the `-c` flag. + +Install Nextflow and verify its installation with the following commands: + +``` +# Install NextFlow +mamba install -c bioconda nextflow=22.10.6 # Press y when prompted with [Y/n]! + +# verify the installation +nextflow -version +``` + +Now that you know how to install packages with Conda/Mamba, install Salmon and FastQC. + +??? Question "Installing Packages" + As an exercise, install Salmon and FastQC using Conda/Mamba. + + ??? Tip "Need a hand?" + ``` + mamba install -c bioconda salmon + mamba install -c bioconda fastqc + ``` + + Or you can do it with a single line (doable if packages are from the *same* channel)! + ``` + mamba install -c bioconda salmon fastqc + ``` + + +You can view the installed conda packages by doing + +``` +conda list +``` + +In order to make your environment reproducible, conda allows you to export your environment. + +``` +conda env export > my_conda_env.yml +``` + +### Package management with Pip + +Pip works similarly to Conda, as Pip is the package management supported by the Python Software foundation. If you use Python for your work it is likely you have installed packages using Pip. + +We only have to install a single package required for this tutorial, MultiQC. To install MultiQC using Pip, do: + +``` +pip install multiqc +``` + +Similar to Conda, you can export your pip environment by doing + +``` +pip3 freeze > my_pip_env.txt +``` + +!!! Note "Why `pip3`?" + `pip3 freeze > my_pip_env.txt` is used to export the pip environment such that it is readable for Python 3. If you want to export an environment for Python 2, you can use `pip freeze > my_pip_env.txt`. + +!!! Success "Conda exports your Pip environment as well" + Exporting your environment using Conda (`conda env export > my_conda_env.yml`) will **ALSO** export your pip environment! + +### GitHub repository setup and documentation + +Create a repository on GitHub to document your work: + +- On [GitHub](https://github.com/), navigate to your account page and create a new repository (add a README to create structure!) +- Clone your repository locally with `git clone .git` (find the url under the green **Code** button) +- Navigate to your cloned repository with `cd `. You should now be inside your repository. +- Move your environemnt files into your repository with `mv ../my_conda_env.yml ../my_pip_env.txt .`. +- Modify your README to reflect the work so far, with meaningful comments (remember that the README is formatted with markdown, a guide to markdown [here](https://www.markdownguide.org/basic-syntax/)). A well documented document may look similar to: + +```` +# reproducibility-tutorial + +This repository contains information about the reproduciblility tutorial from [FOSS 2023 Spring](https://foss.cyverse.org/06_reproducibility_i/#reproducibility-tutorial). + +## Environment Setup + +- Download conda and add right permissions +``` +wget https://repo.anaconda.com/miniconda/Miniconda3-py39_4.12.0-Linux-x86_64.sh +chmod +x Miniconda3-py39_4.12.0-Linux-x86_64.sh +``` +- Install conda silenty (-b), update (-u) and initial start +``` +./Miniconda3-py39_4.12.0-Linux-x86_64.sh -b -u +~/miniconda3/bin/conda init +``` +- Restart bash so that conda is activated +``` +source ~/.bashrc +``` +- Install Mamba +``` +conda install -c conda-forge mamba +``` +- Use environment files in this repo to recreate tutorial env +``` +mamba install -f # Will also install pip packages +``` + +## Obtaining tutorial files + +Tutorial files avaiable [here](https://github.com/CyVerse-learning-materials/foss/blob/mkdocs/docs/assets/tutorials/nf_foss_tut.tar.gz?raw=true). Use `wget` to download appropriate files and decompress files with `tar -xvf`. +``` +wget -O nf_foss_tut.tar.gz https://github.com/CyVerse-learning-materials/foss/blob/mkdocs/docs/assets/tutorials/nf_foss_tut.tar.gz?raw=true +tar -xvf nf_foss_tut.tar.gz +``` + +## Workflow tutorial using Nextflow +Steps of the nextflow tutorial will be added in future commits. +```` + +- Add, commit and push your changes + +``` +git add . +git commit -m "adding initial documentation" +git push +``` + +- When trying to `commit` git will ask who you are: +``` +git config --global user.email "you@example.com" +git config --global user.name "Your Name" +``` + +- When trying to `push`, Github will ask for you username and password; When asked about the password, input a GitHub token. To create a token go to **Account > Settings > Developer settings > Personal access tokens > Generate new token**, add a note, select all the necessary permissions and select Generate token; **Copy the token and use it as password!** [FOSS has covered how to create a Token in Week 0: The Shell and Git, necessary in order to modify code locally](https://foss.cyverse.org/00_basics/#adding-code-locally). + +### Workflow Tutorial using Nextflow + +!!! Info "...what are we doing?" + + In this tutorial (now that we have set up the environment, repository and pushed our first commit) we are going to: + + - Index a [transcriptome](https://en.wikipedia.org/wiki/Transcriptome) file and quantification of DNA reads (using [Salmon](https://salmon.readthedocs.io/en/latest/)). + - Perform quality controls (with [FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)). + - Create a [MultiQC](https://multiqc.info/docs/) report. + +!!! Warning "I Don't know DNA stuff, can I still do this?" + + Absolutely yes! This tutorial is supposed to introduce you to the process of reproducibility using GitHub repositories, package managers and workflow managers! You do not need to understand what each file is as this is meant to show how to make your science reproducible. Focus on understanding the process and theory behind the tutorial rather than the files themselves :fontawesome-regular-face-smile-beam:. + +Nextflow is a workflow manager, similar to [Snakemake](https://snakemake.readthedocs.io/en/stable/). For this tutorial, we decided to use Nextflow as it is easier to learn, more intuitive and user friendly than Snakemake. + +Download the required files using `wget` and `tar` to decompress them + +``` +wget -O nf_foss_tut.tar.gz https://github.com/CyVerse-learning-materials/foss/blob/mkdocs/docs/assets/tutorials/nf_foss_tut.tar.gz?raw=true +tar -xvf nf_foss_tut.tar.gz +``` + +We can now look at the decompressed directory structure by using `tree nf_foss_tut` (if you don not have `tree` installed, you can install it with `sudo apt-get tree` or `mamba install -c conda-forge tree`). + +``` +. +├── nf_foss_tut +│ ├── data +│ │ └── ggal +│ │ ├── gut_1.fq +│ │ ├── gut_2.fq +│ │ ├── liver_1.fq +│ │ ├── liver_2.fq +│ │ ├── lung_1.fq +│ │ ├── lung_2.fq +│ │ └── transcriptome.fa +│ ├── example_script.nf +│ ├── script1.nf +│ ├── script2.nf +│ └── script3.nf + +2 directories, 11 files +``` +!!! Info "Files information" + + - Scripts 1 through 3 (`script.nf`) and `example_script.nf` are the NextFlow files + - `.fq` are fastq files, containing DNA sequences and quality scores + - `transcriptome.fa` is all of the RNA data from the organism (*G.gallus*) + +Let's look at one of the NextFlow files (`.nf`) + +!!! Info "Understanding the Nextflow synthax" + + Nextflow is powerful workflow manager as it can be deployed on HPCs and Clouds. However, it does require a little effort in order to understand its synthax. + + The synthax is broken down into: + + - Defining parameters early + - Defining Processes to be executed + - Defining Channels (blocks that work asynchronously that encapsulate other processes) + + More complex scripts include [operators](https://www.nextflow.io/docs/latest/operator.html) (channel manipulation) and [executors](https://www.nextflow.io/docs/latest/executor.html) (to run things on the cloud and HPC); Nextflow can also be used to run and orchestrate [containers](https://www.nextflow.io/docs/latest/container.html). + + As a good example, let's look at `example_script.nf`: + ``` + /* + * pipeline input parameters + */ + params.reads = "$baseDir/data/ggal/gut_{1,2}.fq" # + params.transcriptome = "$baseDir/data/ggal/transcriptome.fa" # The parameters are set early in the script + params.multiqc = "$baseDir/multiqc" # + params.outdir = "results" # + + println """\ # + R N A S E Q - N F P I P E L I N E # + =================================== # + transcriptome: ${params.transcriptome} # Print statement that will show once the script + reads : ${params.reads} # is executed + outdir : ${params.outdir} # + """ # + .stripIndent() # + + /* + * create a transcriptome file object given then transcriptome string parameter + */ + transcriptome_file = file(params.transcriptome) # Convert input file to string + + /* + * define the `index` process that create a binary index + * given the transcriptome file + */ + process index { # First process, named "index" + conda "bioconda::salmon" # Defines what package is necessary + # + input: ## + file transcriptome from transcriptome_file ## + ## Defines the input and output of the process + output: ## + file 'index' into index_ch ## + # + script: # + """ # + salmon index --threads $task.cpus -t $transcriptome -i index # Command to execute + """ # + } # + + + Channel # Channels allows for scripts to work asynchronously, without waiting for received process. + .fromFilePairs( params.reads ) # .fromFilePairs method creates a channel emitting the file pairs matching a "glob" pattern provided by the user. + .ifEmpty { error "Cannot find any reads matching: ${params.reads}" } # .ifEmpty emits a value specified if no input is found. + .set { read_pairs_ch } # .set operator assigns the channel to a variable whose name is specified as a closure parameter. + # + process quantification { # Second process, named "quantification" + conda "bioconda::salmon" # Defines what package is necessary + input: # + file index from index_ch ## + set pair_id, file(reads) from read_pairs_ch ## + ## Defines the input and output of the process + output: ## + file(pair_id) into quant_ch ## + # + script: # + """ # + salmon quant --threads $task.cpus --libType=U -i index -1 ${reads[0]} -2 ${reads[1]} -o $pair_id # Command to execute + """ # + } + ``` + + Nextflow has in-depth documentation that can be found [here](https://www.nextflow.io/docs/latest/). + +The 3 scripts' tasks are: + +- Script 1 creates the transcriptome index file, necessary for downstream processes. +- Script 2 collects read files by pairs (fastq files come in pairs) and performs quantification. +- Script 3 performs quality control and summarizes all findings in a single report. + +#### Script 1: Indexing transcriptome + +Execute script 1 +``` +nextflow run script1.nf +``` + +The output will be something similar to +``` +N E X T F L O W ~ version 22.10.6 +Launching `script1.nf` [admiring_banach] DSL1 - revision: 66baaf0091 +R N A S E Q - N F P I P E L I N E +=================================== +transcriptome: /home/user/work/folder/nf_foss_tut/data/ggal/transcriptome.fa +reads : /home/user/work/folder/nf_foss_tut/data/ggal/*_{1,2}.fq +outdir : results + +executor > local (1) +[f0/0a72bc] process > index [100%] 1 of 1 ✔ +``` +This is Nextflow's way of telling you that the process has been executed and completed. You should now have a new folder called `work`. Execute `tree work` to see what is inside the folder. +``` +work +└── f0 + └── 0a72bc4d10dba1df2899b0449519e9 + ├── index + │ ├── duplicate_clusters.tsv + │ ├── hash.bin + │ ├── header.json + │ ├── indexing.log + │ ├── quasi_index.log + │ ├── refInfo.json + │ ├── rsd.bin + │ ├── sa.bin + │ ├── txpInfo.bin + │ └── versionInfo.json + └── transcriptome.fa -> /home/user/work/folder/nf_foss_tut/data/ggal/transcriptome.fa + +3 directories, 11 files +``` +These are new index files from the transcriptome provided. + +#### Script 2: collecting pairs and performing quantification + +Execute with +``` +nextflow run script2.nf -resume --reads 'data/ggal/*_{1,2}.fq' +``` + +The output should look like +``` +N E X T F L O W ~ version 22.10.6 +Launching `script2.nf` [stupefied_swirles] DSL2 - revision: d3b0d0121c +R N A S E Q - N F P I P E L I N E +=================================== +transcriptome: /home/user/work/folder/nf_foss_tut/data/ggal/transcriptome.fa +reads : data/ggal/*_{1,2}.fq +outdir : results + +executor > local(3) +[c1/6ece54] process > index [100%] 1 of 1, cached: 1 ✔ +[1b/10b8d5] process > quantification (1) [100%] 3 of 3 ✔ +``` + +#### Script 3: QC and report + +Execute with +``` +nextflow run script3.nf -resume --reads 'data/ggal/*_{1,2}.fq' +``` + +The output should look like +``` +N E X T F L O W ~ version 22.10.6 +Launching `script3.nf` [voluminous_goodall] DSL1 - revision: d118356290 +R N A S E Q - N F P I P E L I N E +=================================== +transcriptome: /home/user/work/folder/nf_foss_tut/data/ggal/transcriptome.fa +reads : data/ggal/*_{1,2}.fq +outdir : results + +executor > local (4) +[c1/6ece54] process > index [100%] 1 of 1, cached: 1 ✔ +[7a/4e9ce4] process > quantification (lung) [100%] 3 of 3, cached: 3 ✔ +[34/d60dbb] process > fastqc (FASTQC on lung) [100%] 3 of 3 ✔ +[e9/e7c392] process > multiqc [100%] 1 of 1 ✔ + +Done! Open the following report in your browser --> results/multiqc_report.html +``` + +As you can notice, the report is an `html` file that can be opened with a browser. Navigate to this file in the Cyverse Data Store and open it. + +### Document your work + +**Document your work.** You should still be in your GitHub folder. Summarize your steps and work on your README file, and push your changes! This will ensure that your work and files are saved and have a valid version that you can come back to in the future if you ever require to. + +??? Question "What if my files are too big?" + + You can always use a `.gitignore`, a file that within itself has defined what should be saved in GitHub when pushing a commit, and what shouldn't be saved. An alternative is to move your files outside of the respository that you're pushing ("stashing"). + +--- + diff --git a/06_reproducibility_i/index.html b/06_reproducibility_i/index.html new file mode 100644 index 000000000..b2995cff1 --- /dev/null +++ b/06_reproducibility_i/index.html @@ -0,0 +1,1869 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 6. Reproducibility I: Repeatability - CyVerse Foundational Open Science Skills 2023 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Repeatability and Reproducibility

+
+

Learning Objectives

+

After this lesson, you should be able to:

+
    +
  • Describe what reproducibility is
  • +
  • Discriminate between reproducibility, replicability, and repeatability
  • +
  • Explain why reproducible research is valuable
  • +
  • Set up a software project with an environment
  • +
+
+

The so-called reproducibility crisis (see 1 , 2 , 3) is something you have +probably heard about (and maybe one of the reasons you have come to FOSS). +Headlines in the media (such as Most scientists can't replicate studies by their peers) +definitely give pause to researchers and ordinary citizens who hope +that the science used to recommend a course of medical treatment or design +self-driving cars is sound.

+

Software Dependency Hell

+

Think for a moment about all the branching possibilities for how a computer could be set up:

+
    +
  • hardware: CPUs, GPUs, RAM
  • +
  • Operating system: many flavors of Linux, MacOS, Windows
  • +
  • Software versions: R, Python, etc.
  • +
  • Package versions: specific R or Python packages, etc.
  • +
+

Simply trying to get the same setup as anyone else is difficult enough, but you can also run into all sorts of dependencies. Let's say you try to update a package to match the version someone else used for a project. However, after updating it, you realize you need to update 3 other packages. After that, you realize you need a newer version of R. You finally manage to get everything set up, but when you go back to a different project the next week, nothing works! All those updates made your code for your other project break. You spend a week fixing your code to work with the newer software, and you're finally done... but now your advisor gives you a dataset 10x the size and says you'll need to run it on the cloud. You throw your laptop out the window and move to the woods to live the life of a hermit.

+

All jokes aside, dealing with software dependencies can be extremely frustrating, and so can setting stuff up on a remote location. It can be even more frustrating if you're trying to reproduce results but you don't actually know the entire software stack used to generate them.

+

Defining Reproducibility

+
+

Question

+
How do you define reproducible science?
+
+
+Answer +

In Reproducibility vs. Replicability, Hans Plesser gives the following useful definitions:

+
    +
  • Repeatability (Same team, same experimental setup): The measurement + can be obtained with stated precision by the same team using the same + measurement procedure, the same measuring system, under the same operating + conditions, in the same location on multiple trials. For computational + experiments, this means that a researcher can reliably repeat her own + computation.
  • +
  • Replicability (Different team, same experimental setup): The + measurement can be obtained with stated precision by a different team + using the same measurement procedure, the same measuring system, under the + same operating conditions, in the same or a different location on multiple + trials. For computational experiments, this means that an independent group + can obtain the same result using the author's own artifacts.
  • +
  • Reproducibility (Different team, different experimental setup): The + measurement can be obtained with stated precision by a different team, + a different measuring system, in a different location on multiple trials. + For computational experiments, this means that an independent group can + obtain the same result using artifacts which they develop completely + independently.
  • +
+

The paper goes on to further simplify:

+
    +
  • Methods reproducibility: provide sufficient detail about procedures + and data so that the same procedures could be exactly repeated.
  • +
  • Results reproducibility: obtain the same results from an independent + study with procedures as closely matched to the original study as + possible.
  • +
  • Inferential reproducibility: draw the same conclusions from either an + independent replication of a study or a reanalysis of the original study.
  • +
+
+
+
+

Discussion Question

+
How do these definitions apply to your research/teaching?
+
+Work with your fellow learners to develop a shortlist of ways reproducibility
+relates to your work. Try to identify challenges and even successes you'd
+like to share.
+
+
+

Often, when we say "reproducibility" we mean all or at least several of the +concepts the proceeding discussion encompasses. Really, reproducibility can be +thought of as set values such as some laboratories express in a code of conduct, +see for example Ross-Ibarra Lab code of conduct or Bahlai Lab Policies. +Reproducibility comes from our obligations and desires to work ethically, +honestly, and with confidence that the data and knowledge we produce is done +has integrity. Reproducibility is also a "spectrum of practices", not a +single step. (See figure below from Peng, 2011).

+

spectrum

+

Assuming you have taken in the potentially anxiety inducing information above, +the most important thing to know is that there is a lot of help to make +reproducibility a foundation of all of your research.

+

Repeatability: a first step

+

A big first step on the road to reproducibility is repeatability. In the context of computation, this means that you should be able to reliably generate the same results.

+

In many ways, this is the biggest hurdle to reproducibility, as it often requires the biggest leap in skills. You can think of repeatability in a few ways.

+
+

Discussion Question

+

Have you ever had any hurdles to reproducing your work?

+
    +
  • Have you ever run into a problem that prevented you from generating the same results, figures, analyses as before?
  • +
  • Have you ever lost time trying to figure out how you (or a collaborator) got a particular result?
  • +
  • What were the issues you ran into, and how might you have solved them?
  • +
+
+

Stratgies for Improving Repeatability

+

Automation

+

In the process of making your work more repeatable, you will often be trying to reduce the amount of work you're doing "by hand". Reducing the human input necessary at each step of a project is a key to reliably reproducing the same results, but it can also help save you a lot of time in the long run.

+

Have you ever manually edited a figure for a manuscript, only to be asked to change something that negated all your manual edits? Well, in the short run, it may have been quicker to just tinker with the graph by hand, but in the long run, figuring out how to use code to generate the whole thing would have saved you time.

+

Automating tasks often comes with an up-front cost, but it is important for the eventual reproducibility of the work, and will often save you time in the short run.

+

Automation also tends to make tasks scale more easily (editing 10 rows of data by hand is fine, editing 10,000 is much harder), adapt to new scenarios, and extend to future projects.

+
+

Discussion Question

+

What are some tasks you have automated or want to automate?

+
    +
  • Have you ever successfully automated a task?
  • +
  • Found a way to make something scale or take less time?
  • +
  • What was the task, and how did you do it?
  • +
  • Are there any things you wish you could automate?
  • +
  • What are some barriers to automating them?
  • +
+
+

While we often think about writing scripts to clean data, run analyses, and generate figures, there are even more parts of a research project that can be automated. Here are a few examples:

+
    +
  • data validation
  • +
  • model checking/validation
  • +
  • software installation
  • +
  • report/manuscript generation
  • +
  • citation management
  • +
  • email/GitHub/Slack notifications
  • +
  • workflow itself (using things like make, Snakemake, Nextflow, targets)
  • +
+

Code can be thought of as a set of machine-actionable instructions, or instructions that we write for a computer to follow. What other sets of instructions do you have, either written down or in your head? How can you turn them into something machine-actionable?

+

Get off your own machine

+

More and more work is being done somewhere other than a personal computer. This could be an HPC cluster at a university or a cloud computing provider. "Cloud" just means somebody else is handling the computers, and you get to use them when you need to, typically for a price. Some 'cloud' options include: Binder, Colab, and Cyverse VICE and Github Codespace.

+

The take home message on Cloud is that it is a great way to make your work more reproducible, as you can share a link to your work, and anyone can run it without having to install anything.

+

Software Management

+

Have you ever tried to run a script, only to realize you had updated a package without knowing, and now the script doesn't work?

+

Package managers that create and manage custom environments can be extremely helpful in keeping software versions aligned with projects.

+

In Python, it is common to use pip and a requirements.txt file, and in R, the renv package can be used to keep package versions stable within individual projects.

+
+

Conda

+

Conda is an open-source package management system and also an environment management system. This means that it helps manage libraries and dependencies within different projects and can isolate different versions of packages and even Python itself into different environments to maintain project consistency and avoid conflicts between package versions.

+

Here's a breakdown of what Conda offers:

+
    +
  • +

    Environment Management:

    +

    Conda allows users to create isolated environments for their projects. Each environment can have its own set of packages, dependencies, and even its own version of Python. This ensures that different projects can have their own specific requirements without interfering with each other. It allows for consistent and reproducible results across different systems and setups

    +
  • +
  • +

    Package Management:

    +

    Beyond managing environments, Conda is also a package manager. It can install specific versions of software packages and ensure that all dependencies are met. While it's commonly associated with Python, Conda can also manage packages from other languages. You can search for packges at https://anaconda.org/.

    +
  • +
  • +

    Cross-Platform:

    +

    Conda is platform-agnostic. This means you can use it across various operating systems like Windows, macOS, and Linux.

    +
  • +
  • +

    Repository Channels:

    +

    Conda packages are retrieved from repositories known as channels. The default channel has a wide array of commonly used packages. However, users can add third-party channels, such as "conda-forge", to access even more packages or specific versions of packages. You can specify the channel by using the -c flag when installing packages.

    +
  • +
  • +

    Integration with Anaconda:

    +

    Conda is the package and environment manager for the Anaconda distribution, which is a distribution of Python and R for scientific computing and data science. However, Conda can be used independently of Anaconda.

    +
  • +
+

Reproducibility tutorial

+

This section is going to cover a short tutorial on reproducibility using software, tools and practices discussed today and throughout FOSS.

+
+

OS of choice

+

This tutorial will be performed using the CyVerse CLI (Command Line Interface). However, if you'd like to use your own computer feel free to! If you're on Mac or Linux, open your terminal; If you're on Windows, use the Windows Subsystem for Linux (WSL)

+
+How to Scroll in Cyverse(Tmux) Cloud Shell +

If you're using the Cyverse Cloud Shell, you can scroll up and down by pressing Ctrl + b and then [ to enter scroll mode. You can then use the arrow keys to scroll up and down. Press q to exit scroll mode.

+
+
+
+

Tutorial Goals

+
    +
  • Create a small workflow using NextFlow
  • +
  • Understand best practices for reproducing a workflow
  • +
  • Apply FOSS procedures in order to enable easiness of reproducibility
  • +
+
+

Prerequisites

+

What you'll be using:

+ +

Installable through Conda/Mamba:

+ +

Installabe through Pip:

+ +
+What's a Conda and how do I install it? +

Conda is a popular tool for installing software. Typically software you want to use requires other software (dependancies) to be installed. Conda can manage all of this for you. Each available Conda package is part of a “recipe” that includes everything you need to run your software. There are different versions of Conda, including some specific for bioinformatics like Bioconda.

+

The CyVerse CLI already comes with Conda installed; Please follow these steps in order to install MiniConda (the lightweight version of Conda) on your system.

+

For the appropriate installation package, visit https://docs.conda.io/en/latest/miniconda.html. ⚠ Note: If you are using the WSL, install the Linux version!!

+
# Download conda and add right permissions
+wget https://repo.anaconda.com/miniconda/Miniconda3-py39_4.12.0-Linux-x86_64.sh     # Modify this to match the OS you're using.
+chmod +x Miniconda3-py39_4.12.0-Linux-x86_64.sh
+
+# install conda silenty (-b) and update (-u) and initial conda run
+./Miniconda3-py39_4.12.0-Linux-x86_64.sh -b -u
+~/miniconda3/bin/conda init
+
+# Restart bash so that conda is activated
+source ~/.bashrc
+
+

You'll be able to tell when conda is active when next (base) is present next to the to the shell prompt such as

+
(base) user@machine
+
+

Conda should now be installed and can be used to install other necessary packages!

+
+

When you start a Cyverse Cloud shell, it will start you in the directory: +

/home/user/work 
+
+Let's change to the Data-Store directory, where we will be working for the rest of the tutorial. This is the Cyverse cloud-storage directory where you can put all your data and files.

+

cd home
+cd <username>
+
+Create our own environment (select y when prompted).

+
conda create --name myenv
+
+

Activate your new environment with

+
conda activate myenv
+
+

You can see the list of environments you can activate by doing

+
conda env list
+
+

Package management with Conda

+

We are going to to use conda to install Mamba, NextFlow, Salmon and FastQC.

+
# Activate Conda using 
+conda activate
+
+# Install Mamba
+conda install -c conda-forge mamba
+
+

You can either use the anaconda website to search for packager, or use the conda search feature (but also, Google is your best friend.)

+
+

Makes things faster with Mamba

+

Mamba is a reimplemetation of Conda using the C++ language, allowing for much quicker Conda experience. The tutorial is going to use Mamba instead of Conda, but you can always replace mamba with conda!

+
+
+

Conda channels

+

Conda operates through channels, specififc repositories where packages are stored. Specific packages sometimes may appear in multiple channels, however it is always helpful to specify a channel with the -c flag.

+
+

Install Nextflow and verify its installation with the following commands:

+
# Install NextFlow
+mamba install -c bioconda nextflow=22.10.6     # Press y when prompted with [Y/n]!
+
+# verify the installation
+nextflow -version
+
+

Now that you know how to install packages with Conda/Mamba, install Salmon and FastQC.

+
+Installing Packages +

As an exercise, install Salmon and FastQC using Conda/Mamba.

+
+Need a hand? +
mamba install -c bioconda salmon
+mamba install -c bioconda fastqc
+
+

Or you can do it with a single line (doable if packages are from the same channel)! +

mamba install -c bioconda salmon fastqc
+

+
+
+

You can view the installed conda packages by doing

+
conda list
+
+

In order to make your environment reproducible, conda allows you to export your environment.

+
conda env export > my_conda_env.yml
+
+

Package management with Pip

+

Pip works similarly to Conda, as Pip is the package management supported by the Python Software foundation. If you use Python for your work it is likely you have installed packages using Pip.

+

We only have to install a single package required for this tutorial, MultiQC. To install MultiQC using Pip, do:

+
pip install multiqc
+
+

Similar to Conda, you can export your pip environment by doing

+
pip3 freeze > my_pip_env.txt
+
+
+

Why pip3?

+

pip3 freeze > my_pip_env.txt is used to export the pip environment such that it is readable for Python 3. If you want to export an environment for Python 2, you can use pip freeze > my_pip_env.txt.

+
+
+

Conda exports your Pip environment as well

+

Exporting your environment using Conda (conda env export > my_conda_env.yml) will ALSO export your pip environment!

+
+

GitHub repository setup and documentation

+

Create a repository on GitHub to document your work:

+
    +
  • On GitHub, navigate to your account page and create a new repository (add a README to create structure!)
  • +
  • Clone your repository locally with git clone <repository_url>.git (find the url under the green Code button)
  • +
  • Navigate to your cloned repository with cd <repository_name>. You should now be inside your repository.
  • +
  • Move your environemnt files into your repository with mv ../my_conda_env.yml ../my_pip_env.txt ..
  • +
  • Modify your README to reflect the work so far, with meaningful comments (remember that the README is formatted with markdown, a guide to markdown here). A well documented document may look similar to:
  • +
+
# reproducibility-tutorial
+
+This repository contains information about the reproduciblility tutorial from [FOSS 2023 Spring](https://foss.cyverse.org/06_reproducibility_i/#reproducibility-tutorial).
+
+## Environment Setup
+
+- Download conda and add right permissions
+```
+wget https://repo.anaconda.com/miniconda/Miniconda3-py39_4.12.0-Linux-x86_64.sh
+chmod +x Miniconda3-py39_4.12.0-Linux-x86_64.sh
+```
+- Install conda silenty (-b), update (-u) and initial start
+```
+./Miniconda3-py39_4.12.0-Linux-x86_64.sh -b -u
+~/miniconda3/bin/conda init
+```
+-  Restart bash so that conda is activated
+```
+source ~/.bashrc
+```
+- Install Mamba
+```
+conda install -c conda-forge mamba
+```
+- Use environment files in this repo to recreate tutorial env
+```
+mamba install -f <my_conda_env.yml>     # Will also install pip packages
+```
+
+## Obtaining tutorial files
+
+Tutorial files avaiable [here](https://github.com/CyVerse-learning-materials/foss/blob/mkdocs/docs/assets/tutorials/nf_foss_tut.tar.gz?raw=true). Use `wget` to download appropriate files and decompress files with `tar -xvf`.
+```
+wget -O nf_foss_tut.tar.gz https://github.com/CyVerse-learning-materials/foss/blob/mkdocs/docs/assets/tutorials/nf_foss_tut.tar.gz?raw=true
+tar -xvf nf_foss_tut.tar.gz
+```
+
+## Workflow tutorial using Nextflow
+Steps of the nextflow tutorial will be added in future commits.
+
+
    +
  • Add, commit and push your changes
  • +
+
git add .
+git commit -m "adding initial documentation"
+git push
+
+
    +
  • +

    When trying to commit git will ask who you are: +

    git config --global user.email "you@example.com"
    +git config --global user.name "Your Name"
    +

    +
  • +
  • +

    When trying to push, Github will ask for you username and password; When asked about the password, input a GitHub token. To create a token go to Account > Settings > Developer settings > Personal access tokens > Generate new token, add a note, select all the necessary permissions and select Generate token; Copy the token and use it as password! FOSS has covered how to create a Token in Week 0: The Shell and Git, necessary in order to modify code locally.

    +
  • +
+

Workflow Tutorial using Nextflow

+
+

...what are we doing?

+

In this tutorial (now that we have set up the environment, repository and pushed our first commit) we are going to:

+ +
+
+

I Don't know DNA stuff, can I still do this?

+

Absolutely yes! This tutorial is supposed to introduce you to the process of reproducibility using GitHub repositories, package managers and workflow managers! You do not need to understand what each file is as this is meant to show how to make your science reproducible. Focus on understanding the process and theory behind the tutorial rather than the files themselves .

+
+

Nextflow is a workflow manager, similar to Snakemake. For this tutorial, we decided to use Nextflow as it is easier to learn, more intuitive and user friendly than Snakemake.

+

Download the required files using wget and tar to decompress them

+
wget -O nf_foss_tut.tar.gz https://github.com/CyVerse-learning-materials/foss/blob/mkdocs/docs/assets/tutorials/nf_foss_tut.tar.gz?raw=true
+tar -xvf nf_foss_tut.tar.gz
+
+

We can now look at the decompressed directory structure by using tree nf_foss_tut (if you don not have tree installed, you can install it with sudo apt-get tree or mamba install -c conda-forge tree).

+
.
+├── nf_foss_tut
+│   ├── data
+│   │   └── ggal
+│   │       ├── gut_1.fq
+│   │       ├── gut_2.fq
+│   │       ├── liver_1.fq
+│   │       ├── liver_2.fq
+│   │       ├── lung_1.fq
+│   │       ├── lung_2.fq
+│   │       └── transcriptome.fa
+│   ├── example_script.nf
+│   ├── script1.nf
+│   ├── script2.nf
+│   └── script3.nf
+
+2 directories, 11 files
+
+
+

Files information

+
    +
  • Scripts 1 through 3 (script<number>.nf) and example_script.nf are the NextFlow files
  • +
  • <file>.fq are fastq files, containing DNA sequences and quality scores
  • +
  • transcriptome.fa is all of the RNA data from the organism (G.gallus)
  • +
+
+

Let's look at one of the NextFlow files (.nf)

+
+

Understanding the Nextflow synthax

+

Nextflow is powerful workflow manager as it can be deployed on HPCs and Clouds. However, it does require a little effort in order to understand its synthax.

+

The synthax is broken down into:

+
    +
  • Defining parameters early
  • +
  • Defining Processes to be executed
  • +
  • Defining Channels (blocks that work asynchronously that encapsulate other processes)
  • +
+

More complex scripts include operators (channel manipulation) and executors (to run things on the cloud and HPC); Nextflow can also be used to run and orchestrate containers.

+

As a good example, let's look at example_script.nf: +

/*                                                                                      
+    * pipeline input parameters                                                            
+    */                                                                                      
+    params.reads = "$baseDir/data/ggal/gut_{1,2}.fq"                                        #
+    params.transcriptome = "$baseDir/data/ggal/transcriptome.fa"                            # The parameters are set early in the script
+    params.multiqc = "$baseDir/multiqc"                                                     #
+    params.outdir = "results"                                                               #
+
+    println """\                                                                            #
+            R N A S E Q - N F   P I P E L I N E                                             #
+            ===================================                                             #
+            transcriptome: ${params.transcriptome}                                          # Print statement that will show once the script
+            reads        : ${params.reads}                                                  # is executed
+            outdir       : ${params.outdir}                                                 #
+            """                                                                             #
+            .stripIndent()                                                                  #
+
+    /* 
+     * create a transcriptome file object given then transcriptome string parameter
+     */
+    transcriptome_file = file(params.transcriptome)                                         # Convert input file to string
+
+    /* 
+     * define the `index` process that create a binary index 
+     * given the transcriptome file
+     */
+    process index {                                                                         # First process, named "index"
+    conda "bioconda::salmon"                                                                # Defines what package is necessary
+                                                                                            #
+    input:                                                                                  ## 
+    file transcriptome from transcriptome_file                                              ##
+                                                                                            ## Defines the input and output of the process
+    output:                                                                                 ##
+    file 'index' into index_ch                                                              ##
+                                                                                            #
+    script:                                                                                 #
+    """                                                                                     #
+    salmon index --threads $task.cpus -t $transcriptome -i index                            # Command to execute
+    """                                                                                     #
+    }                                                                                       #
+
+
+    Channel                                                                                 # Channels allows for scripts to work asynchronously, without waiting for received process.
+    .fromFilePairs( params.reads )                                                          # .fromFilePairs method creates a channel emitting the file pairs matching a "glob" pattern provided by the user. 
+    .ifEmpty { error "Cannot find any reads matching: ${params.reads}"  }                   # .ifEmpty emits a value specified if no input is found.
+    .set { read_pairs_ch }                                                                  # .set operator assigns the channel to a variable whose name is specified as a closure parameter.
+                                                                                            #                
+    process quantification {                                                                # Second process, named "quantification"
+    conda "bioconda::salmon"                                                                # Defines what package is necessary
+    input:                                                                                  #
+    file index from index_ch                                                                ##
+    set pair_id, file(reads) from read_pairs_ch                                             ##
+                                                                                            ## Defines the input and output of the process
+    output:                                                                                 ##
+    file(pair_id) into quant_ch                                                             ##
+                                                                                            #
+    script:                                                                                 #
+    """                                                                                                     #
+    salmon quant --threads $task.cpus --libType=U -i index -1 ${reads[0]} -2 ${reads[1]} -o $pair_id        # Command to execute
+    """                                                                                                     #
+    }        
+

+

Nextflow has in-depth documentation that can be found here.

+
+

The 3 scripts' tasks are:

+
    +
  • Script 1 creates the transcriptome index file, necessary for downstream processes.
  • +
  • Script 2 collects read files by pairs (fastq files come in pairs) and performs quantification.
  • +
  • Script 3 performs quality control and summarizes all findings in a single report.
  • +
+

Script 1: Indexing transcriptome

+

Execute script 1 +

nextflow run script1.nf
+

+

The output will be something similar to +

N E X T F L O W  ~  version 22.10.6
+Launching `script1.nf` [admiring_banach] DSL1 - revision: 66baaf0091
+R N A S E Q - N F   P I P E L I N E    
+===================================
+transcriptome: /home/user/work/folder/nf_foss_tut/data/ggal/transcriptome.fa
+reads        : /home/user/work/folder/nf_foss_tut/data/ggal/*_{1,2}.fq
+outdir       : results
+
+executor >  local (1)
+[f0/0a72bc] process > index [100%] 1 of 1 ✔
+
+This is Nextflow's way of telling you that the process has been executed and completed. You should now have a new folder called work. Execute tree work to see what is inside the folder. +
work
+└── f0
+    └── 0a72bc4d10dba1df2899b0449519e9
+        ├── index
+        │   ├── duplicate_clusters.tsv
+        │   ├── hash.bin
+        │   ├── header.json
+        │   ├── indexing.log
+        │   ├── quasi_index.log
+        │   ├── refInfo.json
+        │   ├── rsd.bin
+        │   ├── sa.bin
+        │   ├── txpInfo.bin
+        │   └── versionInfo.json
+        └── transcriptome.fa -> /home/user/work/folder/nf_foss_tut/data/ggal/transcriptome.fa
+
+3 directories, 11 files
+
+These are new index files from the transcriptome provided.

+

Script 2: collecting pairs and performing quantification

+

Execute with +

nextflow run script2.nf -resume --reads 'data/ggal/*_{1,2}.fq'
+

+

The output should look like +

N E X T F L O W  ~  version 22.10.6
+Launching `script2.nf` [stupefied_swirles] DSL2 - revision: d3b0d0121c
+R N A S E Q - N F   P I P E L I N E    
+===================================
+transcriptome: /home/user/work/folder/nf_foss_tut/data/ggal/transcriptome.fa
+reads        : data/ggal/*_{1,2}.fq
+outdir       : results
+
+executor >  local(3)                                                                                
+[c1/6ece54] process > index [100%] 1 of 1, cached: 1 ✔                          
+[1b/10b8d5] process > quantification (1) [100%] 3 of 3 ✔
+

+

Script 3: QC and report

+

Execute with +

nextflow run script3.nf -resume --reads 'data/ggal/*_{1,2}.fq'
+

+

The output should look like +

N E X T F L O W  ~  version 22.10.6
+Launching `script3.nf` [voluminous_goodall] DSL1 - revision: d118356290
+R N A S E Q - N F   P I P E L I N E    
+===================================
+transcriptome: /home/user/work/folder/nf_foss_tut/data/ggal/transcriptome.fa
+reads        : data/ggal/*_{1,2}.fq
+outdir       : results
+
+executor >  local (4)
+[c1/6ece54] process > index                   [100%] 1 of 1, cached: 1 ✔
+[7a/4e9ce4] process > quantification (lung)   [100%] 3 of 3, cached: 3 ✔
+[34/d60dbb] process > fastqc (FASTQC on lung) [100%] 3 of 3 ✔
+[e9/e7c392] process > multiqc                 [100%] 1 of 1 ✔
+
+Done! Open the following report in your browser --> results/multiqc_report.html
+

+

As you can notice, the report is an html file that can be opened with a browser. Navigate to this file in the Cyverse Data Store and open it.

+

Document your work

+

Document your work. You should still be in your GitHub folder. Summarize your steps and work on your README file, and push your changes! This will ensure that your work and files are saved and have a valid version that you can come back to in the future if you ever require to.

+
+What if my files are too big? +

You can always use a .gitignore, a file that within itself has defined what should be saved in GitHub when pushing a commit, and what shouldn't be saved. An alternative is to move your files outside of the respository that you're pushing ("stashing").

+
+
+ +
+
+ + + Last update: + 2023-10-18 + + +
+ + + + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/07_reproducibility_ii/07_reproducibility_ii.md b/07_reproducibility_ii/07_reproducibility_ii.md new file mode 100644 index 000000000..2789150be --- /dev/null +++ b/07_reproducibility_ii/07_reproducibility_ii.md @@ -0,0 +1,683 @@ +# Reproducibility II: Containers + +!!! Success "Learning Objectives" + + After this lesson, you should be able to: + + * Explain what containers are used for in reproducible research contexts + * Search for and run a Docker container locally or on a remote system + * Understand how version control and data can be used inside a container + +## Containers + +### What *are* containers? + +A [container](https://www.docker.com/resources/what-container/){target=_blank} is a standard unit of software that packages up code and all its dependencies so the application runs quickly and reliably from one computing environment to another. Container images are a lightweight, standalone, executable package of software that includes everything needed to run an application: code, runtime, system tools, system libraries and settings. + +The most common container software is [:material-docker: Docker](https://www.docker.com/){target=_blank}, which is a platform for developers and sysadmins to develop, deploy, and run applications with containers. [Apptainer](https://apptainer.org/docs/user/main/) (formerly, Singularity), is another popular container engine, which allows you to deploy containers on HPC clusters. + +??? Note "Container Avantages" + + Advantages include efficient resource utilization, scalability, and portability. They sepatare the underlying infrastructure, ensuring consistent behavior across different environments and enable better resource management and security by isolating applications from the host system and other containers. + + +Containers are similar to virtual machines (VMs), but are smaller and easier to share. A big distinction between Containers and VMs is what is within each environment: VMs require the OS to be present within the image, whilst containers rely on the host OS and the container engine (i.e, Docker Engine). + +
+ ![containerexp](https://cloudblogs.microsoft.com/wp-content/uploads/sites/37/2019/07/Demystifying-containers_image1.png) +
Difference between Virtual Machines and Containers. Containers are a lot more portable as these do not require an OS to be bundled with the software. Figure source: [Microsoft Cloudblogs](https://cloudblogs.microsoft.com/opensource/2019/07/15/how-to-get-started-containers-docker-kubernetes/).
+
+ +Containers can Cross Operating Systems but may have problems between CPU architectures (e.g., x86 vs ARM vs AMD64). + +### Containers for Reproducible Science + +### Use Cases for Containers + +hosting user created containers on [DockerHub](https://hub.docker.com/), and providing a cross-OS user-friendly toolset for container creation and deployment. + +:simple-rstudio: RStudio has a number of [available Docker containers](https://rocker-project.org/images/versioned/rstudio.html), each for different use cases and maintained by the [Rocker Project](https://rocker-project.org/). [:simple-jupyter: Project Jupyter](https://jupyter.org/) also has a number of [maintained docker images](https://hub.docker.com/u/jupyter) accessible through DockerHub. + + + + +??? Note "Alternatively: The Carpentries Introductory Container workshop" + + The Carpentries have an incubator workshop on [Docker Containers](https://carpentries-incubator.github.io/docker-introduction/){target=_blank}. + + Extra: [Containers in Research Workflows](https://carpentries-incubator.github.io/docker-introduction/reproduciblity/index.html){target=_blank} (section 9 from the lesson linked above). + +--- + +## Introduction to :material-docker: Docker + +
+ ![gitlfs](https://cc.cyverse.org/assets/docker/docker.png) +
+ +#### Prerequisites + +In order to complete these exercises we **STRONGLY** recommend that you set up a personal [:material-github: GitHub](https://github.com){target=_blank} and [:material-docker: DockerHub](https://hub.docker.com){target=_blank} account (account creation for both services is free). + +There are no specific skills needed for this tutorial beyond elementary command line ability and using a text editor. + +We are going to be using [:material-github: GitHub CodeSpaces](https://github.com/features/codespaces){target=_blank} for the hands on portion of the workshop, which features [:material-microsoft-visual-studio-code: VS Code](https://code.visualstudio.com/){target=_blank} as a fully enabled development environment with Docker already installed. + +CodeSpaces is a featured product from GitHub and requires a paid subscription or Academic account for access. Your account will temporarily be integrated with the course GitHub Organization for the next steps in the workshop. + +Our instructions on starting a new CodeSpace are [here](../cloud/codespaces.md). + +??? Info "Installing Docker on your personal computer" + + We are going to be using virtual machines on the cloud for this course, and we will explain why this is a good thing, but there may be a time when you want to run Docker on your own computer. + + Installing Docker takes a little time but it is reasonably straight forward and it is a one-time setup. + + Installation instructions from Docker Official Docs for common OS and chip architectures: + + - [:fontawesome-brands-apple: Mac OS X](https://docs.docker.com/docker-for-mac/){target=_blank} + - [:fontawesome-brands-windows: Windows](https://docs.docker.com/docker-for-windows){target=_blank} + - [:fontawesome-brands-ubuntu: Ubuntu Linux](https://docs.docker.com/install/linux/docker-ce/ubuntu/){target=_blank} + +??? Failure "Never used a terminal before?" + + Before venturing much further, you should review the [Software Carpentry](https://software-carpentry.org/lessons/){target=_blank} lessons on "The Unix Shell" and "Version Control with Git" -- these are great introductory lessons related to the skills we're teaching here. + + You've given up on ever using a terminal? No problem, Docker can be used from graphic interfaces, like [Docker Desktop](https://www.docker.com/products/docker-desktop/){target=_blank}, or platforms like [Portainer](https://www.portainer.io/){target=_blank}. We suggest you read through their documentation on how to use Docker. + +## Fundamental Docker Commands :octicons-terminal-16: + +Docker commands in the terminal use the prefix `docker`. + +!!! Note "For every command listed, the correct execution of the commands through the command line is by using `docker` in front of the command: for example `docker help` or `docker search`. Thus, every :material-docker: = `docker`." + +### :material-docker: help + +Like many other command line applications the most helpful flag is the `help` command which can be used with the Management Commands: + +``` +$ docker +$ docker --help +``` + +### :material-docker: search + +We talk about the concept of [Docker Registries](registry.md) in the next section, but you can search the public list of registeries by using the `docker search` command to find public containers on the Official [Docker Hub Registry](https://hub.docker.com): + +``` +$ docker search +``` + +### :material-docker: pull + +Go to the [Docker Hub](https://hub.docker.com) and type `hello-world` in the search bar at the top of the page. + +Click on the 'tag' tab to see all the available 'hello-world' images. + +Click the 'copy' icon at the right to copy the `docker pull` command, or type it into your terminal: + +``` +$ docker pull hello-world +``` + +!!! Note + If you leave off the `:` and the tag name, it will by default pull the `latest` image + +``` +$ docker pull hello-world +Using default tag: latest +latest: Pulling from library/hello-world +2db29710123e: Pull complete +Digest: sha256:bfea6278a0a267fad2634554f4f0c6f31981eea41c553fdf5a83e95a41d40c38 +Status: Downloaded newer image for hello-world:latest +docker.io/library/hello-world:latest +``` + +Now try to list the files in your current working directory: + +``` +$ ls -l +``` + +??? Question "Where is the image you just pulled?" + + Docker saves container images to the Docker directory (where Docker is installed). + + You won't ever see them in your working directory. + + Use 'docker images' to see all the images on your computer: + + ``` + $ docker images + ``` + +### :material-docker: run + +The single most common command that you'll use with Docker is `docker run` ([see official help manual](https://docs.docker.com/engine/reference/commandline/run/) for more details). + +`docker run` starts a container and executes the default "entrypoint", or any other "command" that follows `run` and any optional flags. + +??? Tip "What is an *entrypoint*?" + + An entrypoint is the initial command(s) executed upon starting the Docker container. It is listed in the `Dockerfile` as `ENTRYPOINT` and can take 2 forms: as commands followed by parameters (`ENTRYPOINT command param1 param2`) or as an executable (`ENTRYPOINT [“executable”, “param1”, “param2”]`) + +``` +$ docker run hello-world:latest +``` + +In the demo above, you used the `docker pull` command to download the `hello-world:latest` image. + +What about if you run a container that you haven't downloaded? + + +``` +$ docker run alpine:latest ls -l +``` + +When you executed the command `docker run alpine:latest`, Docker first looked for the cached image locally, but did not find it, it then ran a `docker pull` behind the scenes to download the `alpine:latest` image and then execute your command. + +When you ran `docker run alpine:latest`, you provided a command `ls -l`, so Docker started the command specified and you saw the listing of the Alpine file system (not your host system, this was insice the container!). + +### :material-docker: images + +You can now use the `docker images` command to see a list of all the cached images on your system: + +``` +$ docker images +REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE +alpine latest c51f86c28340 4 weeks ago 1.109 MB +hello-world latest 690ed74de00f 5 months ago 960 B +``` + +??? Info "Inspecting your containers" + + To find out more about a Docker images, run `docker inspect hello-world:latest` + +### :material-docker: ps + +Now it's time to see the `docker ps` command which shows you all containers that are currently running on your machine. + +``` +docker ps +``` + +Since no containers are running, you see a blank line. + +``` +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +``` + +Let's try a more useful variant: `docker ps --all` + +``` +$ docker ps --all +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +a5eab9243a15 hello-world "/hello" 5 seconds ago Exited (0) 3 seconds ago loving_mcnulty +3bb4e26d2e0c alpine:latest "/bin/sh" 17 seconds ago Exited (0) 16 seconds ago objective_meninsky +192ffdf0cbae opensearchproject/opensearch-dashboards:latest "./opensearch-dashbo…" 3 days ago Exited (0) 3 days ago opensearch-dashboards +a10d47d3b6de opensearchproject/opensearch:latest "./opensearch-docker…" 3 days ago Exited (0) 3 days ago opensearch-node1 + +``` + +What you see above is a list of all containers that you have run. + +Notice that the `STATUS` column shows the current condition of the container: running, or as shown in the example, when the container was exited. + +### :material-docker: stop + +The `stop` command is used for containers that are actively running, either as a foreground process or as a detached background one. + +You can find a running container using the `docker ps` command. + +### :material-docker: rm + +You can remove individual stopped containers by using the `rm` command. Use the `ps` command to see all your stopped contiainers: + +``` +@user ➜ /workspaces $ docker ps -a +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +03542eaac9dc hello-world "/hello" About a minute ago Exited (0) About a minute ago unruffled_nobel +``` + +Use the first few unique alphanumerics in the CONTAINER ID to remove the stopped container: + +``` +@user ➜ /workspaces (mkdocs ✗) $ docker rm 0354 +0354 +``` + +Check to see that the container is gone using `ps -a` a second time (`-a` is shorthand for `--all`; the full command is `docker ps -a` or `docker ps --all`). + +### :material-docker: rmi + +The `rmi` command is similar to `rm` but it will remove the cached images. Used in combination with `docker images` or `docker system df` you can clean up a full cache + +``` +docker rmi +``` + +``` +@user ➜ /workspaces/ (mkdocs ✗) $ docker images +REPOSITORY TAG IMAGE ID CREATED SIZE +opendronemap/webodm_webapp latest e075d13aaf35 21 hours ago 1.62GB +redis latest a10f849e1540 5 days ago 117MB +opendronemap/nodeodm latest b4c50165f838 6 days ago 1.77GB +hello-world latest feb5d9fea6a5 7 months ago 13.3kB +opendronemap/webodm_db latest e40c0f274bba 8 months ago 695MB +@user ➜ /workspaces (mkdocs ✗) $ docker rmi hello-world +Untagged: hello-world:latest +Untagged: hello-world@sha256:10d7d58d5ebd2a652f4d93fdd86da8f265f5318c6a73cc5b6a9798ff6d2b2e67 +Deleted: sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412 +Deleted: sha256:e07ee1baac5fae6a26f30cabfe54a36d3402f96afda318fe0a96cec4ca393359 +@user ➜ /workspaces (mkdocs ✗) $ +``` + +### :material-docker: system + +The `system` command can be used to view information about containers on your cache, you can view your total disk usage, view events or info. + +You can also use it to `prune` unused data and image layers. + +To remove all cached layers, images, and data you can use the `-af` flag for `all` and `force` + +``` +docker system prune -af +``` + +### :material-docker: tag + +By default an image will recieve the tag `latest` when it is not specified during the `docker build` + +Image names and tags can be created or changed using the `docker tag` command. + +``` +docker tag imagename:oldtag imagename:newtag +``` + +You can also change the registry name used in the tag: + +``` +docker tag docker.io/username/imagename:oldtag harbor.cyverse.org/project/imagename:newtag +``` + +The cached image laters will not change their `sha256` and both image tags will still be present after the new tag name is generated. + +### :material-docker: push + +By default `docker push` will upload your local container image to the [Docker Hub](){target=_blank} + +We will cover `push` in more detail at the end of Day 2, but the essential functionality is the same as pull. + +Also, make sure that your container has the appropriate [tag](./intro.md#tag) + +First, make sure to log into the Docker Hub, this will allow you to download private limages, to upload private/public images: + +``` +docker login +``` + +Alternately, you can [link GitHub / GitLab accounts](https://hub.docker.com/settings/linked-accounts){target=_blank} to the Docker Hub. + +To push the image to the Docker Hub: + +``` +docker push username/imagename:tag +``` + +or + +``` +docker push docker.io/username/imagename:tag +``` +or, to a private registry, here we push to CyVerse private `harbor.cyverse.org` registry which uses "project" sub folders: + +``` +docker push harbor.cyverse.org/project/imagename:newtag +``` + +--- + +## Interactive Commands with Containers + +Lets try another command, this time to access the container as a shell: + +``` +$ docker run alpine:latest sh +``` + +Wait, nothing happened, right? + +Is that a bug? + +Well, no. + +The container will exit after running any scripted commands such as `sh`, unless they are run in an "interactive" terminal (TTY) - so for this example to not exit, you need to add the `-i` for interactive and `-t` for TTY. You can run them both in a single flag as `-it`, which is the more common way of adding the flag: + +``` +$ docker run -it alpine:latest sh +``` + +The prompt should change to something more like `/ # `. + +You are now running a shell inside the container! + +Try out a few commands like `ls -l`, `uname -a` and others. + +Exit out of the container by giving the `exit` command. + +``` +/ # exit +``` + +??? Warning "Making sure you've exited the container" + + If you type `exit` your **container** will exit and is no longer active. To check that, try the following: + + ``` + $ docker ps --latest + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + de4bbc3eeaec alpine "/bin/sh" 3 minutes ago Exited (0) About a minute ago pensive_leavitt + ``` + + If you want to keep the container active, then you can use keys `ctrl +p` `ctrl +q`. To make sure that it is not exited run the same `docker ps --latest` command again: + + ``` + $ docker ps --latest + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 0db38ea51a48 alpine "sh" 3 minutes ago Up 3 minutes elastic_lewin + ``` + + Now if you want to get back into that container, then you can type `docker attach `. This way you can save your container: + + ``` + $ docker attach 0db38ea51a48 + ``` + +--- + +## :material-home: House Keeping and :material-broom: Cleaning Up Exited Containers + +### Managing Docker Images + +In the previous example, you pulled the `alpine` image from the registry and asked the Docker client to run a container based on that image. To see the list of images that are available locally on your system, run the `docker images` command. + +``` +$ docker images +REPOSITORY TAG IMAGE ID CREATED SIZE +ubuntu bionic 47b19964fb50 4 weeks ago 88.1MB +alpine latest caf27325b298 4 weeks ago 5.53MB +hello-world latest fce289e99eb9 2 months ago 1.84kB +``` + +Above is a list of images that I've pulled from the registry. You will have a different list of images on your machine. The **TAG** refers to a particular snapshot of the image and the **ID** is the corresponding unique identifier for that image. + +For simplicity, you can think of an image akin to a Git repository - images can be committed with changes and have multiple versions. When you do not provide a specific version number, the client defaults to latest. + +### Clutter and Cache + +Docker images are **cached** on your machine in the location where Docker was installed. These image files are not visible in the same directory where you might have used `docker pull `. + +Some Docker images can be large. Especially data science images with many scientific programming libraries and packages pre-installed. + +Pulling many images from the Docker Registries may fill up your hard disk! To inspect your system and disk use: + +``` +$ docker system info +$ docker system df +``` + +To find out how many images are on your machine, type: + +``` +$ docker images +``` + +To remove images that you no longer need, type: + +``` +$ docker system prune +``` + +This is where it becomes important to differentiate between *images*, *containers*, and *volumes* (which we'll get to more in a bit). You can take care of all of the dangling images and containers on your system. + +Note, that `prune` will not remove your cached *images* + +``` +$ docker system prune + WARNING! This will remove: + - all stopped containers + - all networks not used by at least one container + - all dangling images + - all dangling build cache + +Are you sure you want to continue? [y/N] +``` + +If you added the `-af` flag it will remove "all" `-a` dangling images, empty containers, AND ALL CACHED IMAGES with "force" `-f`. + +--- + +## Managing Data in Docker + +It is possible to store data within the writable layer of a container, but there are some limitations: + +!!! warning "" + - **The data doesn’t persist when that container is no longer running**, and it can be difficult to get the data out of the container if another process needs it. + - **A container’s writable layer is tightly coupled to the host machine where the container is running**. You can’t easily move the data somewhere else. + - **Its better to put your data into the container *AFTER* it is built** - this keeps the container size smaller and easier to move across networks. + +Docker offers three different ways to mount data into a container from the Docker host: **Volumes**, **tmpfs mounts** and **bind mounts**. Here, we will only be exploring *Volumes*. + +
+ ![vol](https://docs.docker.com/storage/images/types-of-mounts-volume.png) +
The various methods for accessing data using containers. tmpfs mounts store data directly in memory, bind mounts and volumes use the host's file system. Volumes are flexible and only attach a specific directory to the container, whilst bind mounts require the user to share the full path to a file in order to allow the container to access it. Taken from the official Docker documentation on [data management with docker](https://docs.docker.com/storage/volumes/).
+
+ +!!! info "Why Volumes?" + + Volumes are often a better choice than persisting data in a container’s writable layer, because using a volume does not increase the size of containers using it, and the volume’s contents exist outside the lifecycle of a given container. Some of the advantages of volumes include: + + - Volumes are easier to back up or migrate. + - You can manage volumes using Docker CLI commands or the Docker API. + - Volumes work on both UNIX and Windows containers. + - Volumes can be more safely shared among multiple containers. + - A new volume’s contents can be pre-populated by a container. + +The `-v` flag is used for mounting volumes: + +`-v` or `--volume`: Consists of three fields, separated by colon characters (:). + +The fields must be in the correct order, and the meaning of each field is not immediately obvious. + +Required: + +- The **first** field is the path on your **local machine** that where the data are. +- The **second** field is the path where the file or directory are **mounted in the container**. + +Optional: + +- The third field is optional, and is a comma-separated list of options, such as `ro` (read only). + +The synthax looks like the following: +``` +-v /home/username/your_data_folder:/container_folder +``` + +This is what a full docker command with a mounted volume looks like: +``` +$ docker run -v /home/$USER/read_cleanup:/work alpine:latest ls -l /work +``` + +So what if we wanted to work interactively inside the container? + +``` +$ docker run -it -v /home/$USER/read_cleanup:/work alpine:latest sh +``` + +Once you're in the container, you will see that the `/work` directory is mounted in the working directory. + +Any data that you add to that folder outside the container will appear **INSIDE** the container. And any work you do inside the container saved in that folder will be saved OUTSIDE the container as well. + +--- + +## Working with Interactive Containers + +Let's go ahead and run some Integrated Development Environment images from "trusted" organizations on the Docker Hub Registry. + +### :material-language-python: Jupyter Lab + +In this section, let's find a Docker image which can run a Jupyter Notebook + +Search for official images on Docker Hub which contain the string 'jupyter' + +``` +$ docker search jupyter +``` + +It should return something like: + +``` +NAME DESCRIPTION STARS OFFICIAL AUTOMATED +jupyter/datascience-notebook Jupyter Notebook Data Science Stack from htt… 912 +jupyter/all-spark-notebook Jupyter Notebook Python, Scala, R, Spark, Me… 374 +jupyter/scipy-notebook Jupyter Notebook Scientific Python Stack fro… 337 +jupyterhub/jupyterhub JupyterHub: multi-user Jupyter notebook serv… 307 [OK] +jupyter/tensorflow-notebook Jupyter Notebook Scientific Python Stack w/ … 298 +jupyter/pyspark-notebook Jupyter Notebook Python, Spark, Mesos Stack … 224 +jupyter/base-notebook Small base image for Jupyter Notebook stacks… 168 +jupyter/minimal-notebook Minimal Jupyter Notebook Stack from https://… 150 +jupyter/r-notebook Jupyter Notebook R Stack from https://github… 44 +jupyterhub/singleuser single-user docker images for use with Jupyt… 43 [OK] +jupyter/nbviewer Jupyter Notebook Viewer 27 [OK] +``` + +??? Warning "Untrusted community images" + + An important thing to note: None of these Jupyter or RStudio images are 'official' Docker images, meaning they could be trojans for spyware, malware, or other nasty warez. + +--- + +## Understanding PORTS + +When we want to run a container that runs on the open internet, we need to add a [TCP or UDP port number](https://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers){target=_blank} from which we can access the application in a browser using the machine's IP (Internet Protocol) address or DNS (Domain Name Service) location. + +To do this, we need to access the container over a separate port address on the machine we're working on. + +Docker uses the flag `--port` or `-p` for short followed by two sets of port numbers. + +??? Note "Exposing Ports" + + Docker can in fact expose all ports to a container using the capital `-P` flag + + For security purposes, it is generally NEVER a good idea to expose all ports. + +Typically these numbers can be the same, but in some cases your machine may already be running another program (or container) on that open port. + +The port has two sides `left:right` separated by a colon. The left side port number is the INTERNAL port that the container software thinks its using. The right side number is the EXTERNAL port that you can access on your computer (or virtual machine). + +Here are some examples to run basic RStudio and Jupyter Lab: + +``` +$ docker run --rm -p 8787:8787 -e PASSWORD=cc2022 rocker/rstudio +``` + +**note: on CodeSpaces, the reverse proxy for the DNS requires you to turn off authentication** + +``` +$ docker run --rm -p 8787:8787 -e DISABLE_AUTH=true rocker/rstudio +``` + +``` +$ docker run --rm -p 8888:8888 jupyter/base-notebook +``` + +??? Note "Preempting stale containers from your cache" + + We've added the `--rm` flag, which means the container will automatically removed from the cache when the container is exited. + + When you start an IDE in a terminal, the terminal connection must stay active to keep the container alive. + +### Detaching your container while it is running + +If we want to keep our window in the foreground we can use the `-d` - the *detached* flag will run the container as a background process, rather than in the foreground. + +When you run a container with this flag, it will start, run, telling you the container ID: + +``` +$ docker run --rm -d -p 8888:8888 jupyter/base-notebook +``` +Note, that your terminal is still active and you can use it to launch more containers. + +To view the running container, use the `docker ps` command. + +--- + +## Docker Commands + +Here is a compiled list of fundamental Docker Commands: + +| Command | Usage | Example | +|---------|-------|---------| +| `pull` | Downloads an image from Docker Hub | `docker pull hello-world:latest` | +| `run` | runs a container with entrypoint | `docker run -it user/image:tag` | +| `build` | Builds a docker image from a Dockerfile in current working directory | `docker build -t user/image:tag .` | +| `images` | List all images on the local machine | `docker images list` | +| `tag` | Adds a different tag name to an image | `docker tag hello-world:latest hello-world:new-tag-name` | +| `login` | Authenticate to the Docker Hub (requires username and password) | `docker login` | +| `push` | Upload your new image to the Docker Hub | `docker push user/image:tag` | +| `inspect` | Provide detailed information on constructs controlled by Docker | `docker inspect containerID` | +| `ps` | List all containers on your system | `docker ps -a` | +| `rm` | Delete a stopped or running container |`docker rm -f ` | +| `rmi` | Delete an *image* from your cache | `docker rmi hello-world:latest` | +| `stop` | Stop a running container | `docker stop alpine:latest` | +| `system` | View system details, remove old images and containers with `prune` |`docker system prune` | +| `push` | Uploads an image to the Docker Hub (or other private registry) | `docker push username/image:tag` | + +--- + +## Self Assessment + +??? Question "A Docker container with the tagname `latest` ensures old code and data will work on a new computer setup?" + + !!! Success "Answer" + + Never use the `latest` tag for a publication or archival. + + The `latest` version is always being updated and should be considered "cutting edge". + + `latest` is the default tag name of all Docker images + + `latest` versions MAY have backward compatibility with older code and data, but this is not always a given + +??? Question "When are containers the right solution?" + + !!! Success + + Containers are valuable when you need to run an analyses on a remote platform, and you need it to work every time. + + !!! Failure + + You need to do some simple text file editing + + You need to run a web service + + + +??? Question "True or False: Docker containers allow for reproducibility across all computing platforms" + + !!! Success "False" + + While Docker allows you to quickly run software from other people, it may not work across every platform. + + There are different CPU architectures (`arm`, `amd64`, `x64, `x86`) deployed across cloud, computer workstations, laptops, and cellular phones. + + Docker containers and their software can be cross-compiled across architectures, but this must be done by the creators. + +??? Question "When is it advisable to not trust a Docker image?" + + !!! Success "When you cannot view its Dockerfile" + + Featured and Verified Docker images can be trusted, in general. + + User generated images should not be trusted unless you can view their Dockerfile, or build logs to determine what is actually in the container you are attempting to run. \ No newline at end of file diff --git a/07_reproducibility_ii/index.html b/07_reproducibility_ii/index.html new file mode 100644 index 000000000..6d2f96639 --- /dev/null +++ b/07_reproducibility_ii/index.html @@ -0,0 +1,2017 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 7. Reproducibility II: Replicability - CyVerse Foundational Open Science Skills 2023 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Reproducibility II: Containers

+
+

Learning Objectives

+

After this lesson, you should be able to:

+
    +
  • Explain what containers are used for in reproducible research contexts
  • +
  • Search for and run a Docker container locally or on a remote system
  • +
  • Understand how version control and data can be used inside a container
  • +
+
+

Containers

+

What are containers?

+

A container is a standard unit of software that packages up code and all its dependencies so the application runs quickly and reliably from one computing environment to another. Container images are a lightweight, standalone, executable package of software that includes everything needed to run an application: code, runtime, system tools, system libraries and settings.

+

The most common container software is Docker, which is a platform for developers and sysadmins to develop, deploy, and run applications with containers. Apptainer (formerly, Singularity), is another popular container engine, which allows you to deploy containers on HPC clusters.

+
+Container Avantages +

Advantages include efficient resource utilization, scalability, and portability. They sepatare the underlying infrastructure, ensuring consistent behavior across different environments and enable better resource management and security by isolating applications from the host system and other containers.

+
+

Containers are similar to virtual machines (VMs), but are smaller and easier to share. A big distinction between Containers and VMs is what is within each environment: VMs require the OS to be present within the image, whilst containers rely on the host OS and the container engine (i.e, Docker Engine).

+
+

containerexp +

Difference between Virtual Machines and Containers. Containers are a lot more portable as these do not require an OS to be bundled with the software. Figure source: Microsoft Cloudblogs.

+
+

Containers can Cross Operating Systems but may have problems between CPU architectures (e.g., x86 vs ARM vs AMD64).

+

Containers for Reproducible Science

+

Use Cases for Containers

+

hosting user created containers on DockerHub, and providing a cross-OS user-friendly toolset for container creation and deployment.

+

RStudio has a number of available Docker containers, each for different use cases and maintained by the Rocker Project. Project Jupyter also has a number of maintained docker images accessible through DockerHub.

+
+Alternatively: The Carpentries Introductory Container workshop +

The Carpentries have an incubator workshop on Docker Containers.

+

Extra: Containers in Research Workflows (section 9 from the lesson linked above).

+
+
+

Introduction to Docker

+
+

gitlfs

+
+

Prerequisites

+

In order to complete these exercises we STRONGLY recommend that you set up a personal GitHub and DockerHub account (account creation for both services is free).

+

There are no specific skills needed for this tutorial beyond elementary command line ability and using a text editor.

+

We are going to be using GitHub CodeSpaces for the hands on portion of the workshop, which features VS Code as a fully enabled development environment with Docker already installed.

+

CodeSpaces is a featured product from GitHub and requires a paid subscription or Academic account for access. Your account will temporarily be integrated with the course GitHub Organization for the next steps in the workshop.

+

Our instructions on starting a new CodeSpace are here.

+
+Installing Docker on your personal computer +

We are going to be using virtual machines on the cloud for this course, and we will explain why this is a good thing, but there may be a time when you want to run Docker on your own computer.

+

Installing Docker takes a little time but it is reasonably straight forward and it is a one-time setup.

+

Installation instructions from Docker Official Docs for common OS and chip architectures:

+ +
+
+Never used a terminal before? +

Before venturing much further, you should review the Software Carpentry lessons on "The Unix Shell" and "Version Control with Git" -- these are great introductory lessons related to the skills we're teaching here.

+

You've given up on ever using a terminal? No problem, Docker can be used from graphic interfaces, like Docker Desktop, or platforms like Portainer. We suggest you read through their documentation on how to use Docker.

+
+

Fundamental Docker Commands

+

Docker commands in the terminal use the prefix docker.

+
+

For every command listed, the correct execution of the commands through the command line is by using docker in front of the command: for example docker help or docker search. Thus, every = docker.

+
+

help

+

Like many other command line applications the most helpful flag is the help command which can be used with the Management Commands:

+
$ docker 
+$ docker --help
+
+ +

We talk about the concept of Docker Registries in the next section, but you can search the public list of registeries by using the docker search command to find public containers on the Official Docker Hub Registry:

+
$ docker search  
+
+

pull

+

Go to the Docker Hub and type hello-world in the search bar at the top of the page.

+

Click on the 'tag' tab to see all the available 'hello-world' images.

+

Click the 'copy' icon at the right to copy the docker pull command, or type it into your terminal:

+
$ docker pull hello-world
+
+
+

Note

+
If you leave off the `:` and the tag name, it will by default pull the `latest` image
+
+
+
$ docker pull hello-world
+Using default tag: latest
+latest: Pulling from library/hello-world
+2db29710123e: Pull complete 
+Digest: sha256:bfea6278a0a267fad2634554f4f0c6f31981eea41c553fdf5a83e95a41d40c38
+Status: Downloaded newer image for hello-world:latest
+docker.io/library/hello-world:latest
+
+

Now try to list the files in your current working directory:

+
$ ls -l
+
+
+Where is the image you just pulled? +

Docker saves container images to the Docker directory (where Docker is installed).

+

You won't ever see them in your working directory.

+

Use 'docker images' to see all the images on your computer:

+
$ docker images
+
+
+

run

+

The single most common command that you'll use with Docker is docker run (see official help manual for more details).

+

docker run starts a container and executes the default "entrypoint", or any other "command" that follows run and any optional flags.

+
+What is an entrypoint? +

An entrypoint is the initial command(s) executed upon starting the Docker container. It is listed in the Dockerfile as ENTRYPOINT and can take 2 forms: as commands followed by parameters (ENTRYPOINT command param1 param2) or as an executable (ENTRYPOINT [“executable”, “param1”, “param2”])

+
+
$ docker run hello-world:latest
+
+

In the demo above, you used the docker pull command to download the hello-world:latest image.

+

What about if you run a container that you haven't downloaded?

+
$ docker run alpine:latest ls -l
+
+

When you executed the command docker run alpine:latest, Docker first looked for the cached image locally, but did not find it, it then ran a docker pull behind the scenes to download the alpine:latest image and then execute your command.

+

When you ran docker run alpine:latest, you provided a command ls -l, so Docker started the command specified and you saw the listing of the Alpine file system (not your host system, this was insice the container!).

+

images

+

You can now use the docker images command to see a list of all the cached images on your system:

+
$ docker images 
+REPOSITORY              TAG                 IMAGE ID            CREATED             VIRTUAL SIZE
+alpine                  latest              c51f86c28340        4 weeks ago         1.109 MB
+hello-world             latest              690ed74de00f        5 months ago        960 B
+
+
+Inspecting your containers +

To find out more about a Docker images, run docker inspect hello-world:latest

+
+

ps

+

Now it's time to see the docker ps command which shows you all containers that are currently running on your machine.

+
docker ps
+
+

Since no containers are running, you see a blank line.

+
$ docker ps
+CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS               NAMES
+
+

Let's try a more useful variant: docker ps --all

+
$ docker ps --all
+CONTAINER ID   IMAGE                                            COMMAND                  CREATED          STATUS                      PORTS     NAMES
+a5eab9243a15   hello-world                                      "/hello"                 5 seconds ago    Exited (0) 3 seconds ago              loving_mcnulty
+3bb4e26d2e0c   alpine:latest                                    "/bin/sh"                17 seconds ago   Exited (0) 16 seconds ago             objective_meninsky
+192ffdf0cbae   opensearchproject/opensearch-dashboards:latest   "./opensearch-dashbo…"   3 days ago       Exited (0) 3 days ago                 opensearch-dashboards
+a10d47d3b6de   opensearchproject/opensearch:latest              "./opensearch-docker…"   3 days ago       Exited (0) 3 days ago                 opensearch-node1
+
+

What you see above is a list of all containers that you have run.

+

Notice that the STATUS column shows the current condition of the container: running, or as shown in the example, when the container was exited.

+

stop

+

The stop command is used for containers that are actively running, either as a foreground process or as a detached background one.

+

You can find a running container using the docker ps command.

+

rm

+

You can remove individual stopped containers by using the rm command. Use the ps command to see all your stopped contiainers:

+
@user ➜ /workspaces $ docker ps -a
+CONTAINER ID   IMAGE                        COMMAND                  CREATED              STATUS                          PORTS     NAMES
+03542eaac9dc   hello-world                  "/hello"                 About a minute ago   Exited (0) About a minute ago             unruffled_nobel
+
+

Use the first few unique alphanumerics in the CONTAINER ID to remove the stopped container:

+
@user ➜ /workspaces (mkdocs ✗) $ docker rm 0354
+0354
+
+

Check to see that the container is gone using ps -a a second time (-a is shorthand for --all; the full command is docker ps -a or docker ps --all).

+

rmi

+

The rmi command is similar to rm but it will remove the cached images. Used in combination with docker images or docker system df you can clean up a full cache

+
docker rmi
+
+
@user ➜ /workspaces/ (mkdocs ✗) $ docker images
+REPOSITORY                   TAG       IMAGE ID       CREATED        SIZE
+opendronemap/webodm_webapp   latest    e075d13aaf35   21 hours ago   1.62GB
+redis                        latest    a10f849e1540   5 days ago     117MB
+opendronemap/nodeodm         latest    b4c50165f838   6 days ago     1.77GB
+hello-world                  latest    feb5d9fea6a5   7 months ago   13.3kB
+opendronemap/webodm_db       latest    e40c0f274bba   8 months ago   695MB
+@user ➜ /workspaces (mkdocs ✗) $ docker rmi hello-world
+Untagged: hello-world:latest
+Untagged: hello-world@sha256:10d7d58d5ebd2a652f4d93fdd86da8f265f5318c6a73cc5b6a9798ff6d2b2e67
+Deleted: sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412
+Deleted: sha256:e07ee1baac5fae6a26f30cabfe54a36d3402f96afda318fe0a96cec4ca393359
+@user ➜ /workspaces (mkdocs ✗) $ 
+
+

system

+

The system command can be used to view information about containers on your cache, you can view your total disk usage, view events or info.

+

You can also use it to prune unused data and image layers.

+

To remove all cached layers, images, and data you can use the -af flag for all and force

+
docker system prune -af
+
+

tag

+

By default an image will recieve the tag latest when it is not specified during the docker build

+

Image names and tags can be created or changed using the docker tag command.

+
docker tag imagename:oldtag imagename:newtag
+
+

You can also change the registry name used in the tag:

+
docker tag docker.io/username/imagename:oldtag harbor.cyverse.org/project/imagename:newtag
+
+

The cached image laters will not change their sha256 and both image tags will still be present after the new tag name is generated.

+

push

+

By default docker push will upload your local container image to the Docker Hub

+

We will cover push in more detail at the end of Day 2, but the essential functionality is the same as pull.

+

Also, make sure that your container has the appropriate tag

+

First, make sure to log into the Docker Hub, this will allow you to download private limages, to upload private/public images:

+
docker login
+
+

Alternately, you can link GitHub / GitLab accounts to the Docker Hub.

+

To push the image to the Docker Hub:

+
docker push username/imagename:tag 
+
+

or

+

docker push docker.io/username/imagename:tag
+
+or, to a private registry, here we push to CyVerse private harbor.cyverse.org registry which uses "project" sub folders:

+
docker push harbor.cyverse.org/project/imagename:newtag 
+
+
+

Interactive Commands with Containers

+

Lets try another command, this time to access the container as a shell:

+
$ docker run alpine:latest sh
+
+

Wait, nothing happened, right?

+

Is that a bug?

+

Well, no.

+

The container will exit after running any scripted commands such as sh, unless they are run in an "interactive" terminal (TTY) - so for this example to not exit, you need to add the -i for interactive and -t for TTY. You can run them both in a single flag as -it, which is the more common way of adding the flag:

+
$ docker run -it alpine:latest sh
+
+

The prompt should change to something more like / #.

+

You are now running a shell inside the container!

+

Try out a few commands like ls -l, uname -a and others.

+

Exit out of the container by giving the exit command.

+
/ # exit
+
+
+Making sure you've exited the container +

If you type exit your container will exit and is no longer active. To check that, try the following:

+
$ docker ps --latest
+CONTAINER ID        IMAGE                 COMMAND                  CREATED             STATUS                          PORTS                    NAMES
+de4bbc3eeaec        alpine                "/bin/sh"                3 minutes ago       Exited (0) About a minute ago                            pensive_leavitt
+
+

If you want to keep the container active, then you can use keys ctrl +p ctrl +q. To make sure that it is not exited run the same docker ps --latest command again:

+
$ docker ps --latest
+CONTAINER ID        IMAGE                 COMMAND                  CREATED             STATUS                         PORTS                    NAMES
+0db38ea51a48        alpine                "sh"                     3 minutes ago       Up 3 minutes                                            elastic_lewin
+
+

Now if you want to get back into that container, then you can type docker attach <container id>. This way you can save your container:

+
$ docker attach 0db38ea51a48
+
+
+
+

House Keeping and Cleaning Up Exited Containers

+

Managing Docker Images

+

In the previous example, you pulled the alpine image from the registry and asked the Docker client to run a container based on that image. To see the list of images that are available locally on your system, run the docker images command.

+
$ docker images
+REPOSITORY                 TAG                 IMAGE ID            CREATED             SIZE
+ubuntu                     bionic              47b19964fb50        4 weeks ago         88.1MB
+alpine                     latest              caf27325b298        4 weeks ago         5.53MB
+hello-world                latest              fce289e99eb9        2 months ago        1.84kB
+
+

Above is a list of images that I've pulled from the registry. You will have a different list of images on your machine. The TAG refers to a particular snapshot of the image and the ID is the corresponding unique identifier for that image.

+

For simplicity, you can think of an image akin to a Git repository - images can be committed with changes and have multiple versions. When you do not provide a specific version number, the client defaults to latest.

+

Clutter and Cache

+

Docker images are cached on your machine in the location where Docker was installed. These image files are not visible in the same directory where you might have used docker pull <imagename>.

+

Some Docker images can be large. Especially data science images with many scientific programming libraries and packages pre-installed.

+

Pulling many images from the Docker Registries may fill up your hard disk! To inspect your system and disk use:

+
$ docker system info
+$ docker system df
+
+

To find out how many images are on your machine, type:

+
$ docker images
+
+

To remove images that you no longer need, type:

+
$ docker system prune
+
+

This is where it becomes important to differentiate between images, containers, and volumes (which we'll get to more in a bit). You can take care of all of the dangling images and containers on your system.

+

Note, that prune will not remove your cached images

+
$ docker system prune
+    WARNING! This will remove:
+    - all stopped containers
+    - all networks not used by at least one container
+    - all dangling images
+    - all dangling build cache
+
+Are you sure you want to continue? [y/N]
+
+

If you added the -af flag it will remove "all" -a dangling images, empty containers, AND ALL CACHED IMAGES with "force" -f.

+
+

Managing Data in Docker

+

It is possible to store data within the writable layer of a container, but there are some limitations:

+
+
    +
  • The data doesn’t persist when that container is no longer running, and it can be difficult to get the data out of the container if another process needs it.
  • +
  • A container’s writable layer is tightly coupled to the host machine where the container is running. You can’t easily move the data somewhere else.
  • +
  • Its better to put your data into the container AFTER it is built - this keeps the container size smaller and easier to move across networks.
  • +
+
+

Docker offers three different ways to mount data into a container from the Docker host: Volumes, tmpfs mounts and bind mounts. Here, we will only be exploring Volumes.

+
+

vol +

The various methods for accessing data using containers. tmpfs mounts store data directly in memory, bind mounts and volumes use the host's file system. Volumes are flexible and only attach a specific directory to the container, whilst bind mounts require the user to share the full path to a file in order to allow the container to access it. Taken from the official Docker documentation on data management with docker.

+
+
+

Why Volumes?

+

Volumes are often a better choice than persisting data in a container’s writable layer, because using a volume does not increase the size of containers using it, and the volume’s contents exist outside the lifecycle of a given container. Some of the advantages of volumes include:

+
    +
  • Volumes are easier to back up or migrate.
  • +
  • You can manage volumes using Docker CLI commands or the Docker API.
  • +
  • Volumes work on both UNIX and Windows containers.
  • +
  • Volumes can be more safely shared among multiple containers.
  • +
  • A new volume’s contents can be pre-populated by a container.
  • +
+
+

The -v flag is used for mounting volumes:

+

-v or --volume: Consists of three fields, separated by colon characters (:).

+

The fields must be in the correct order, and the meaning of each field is not immediately obvious.

+

Required:

+
    +
  • The first field is the path on your local machine that where the data are.
  • +
  • The second field is the path where the file or directory are mounted in the container.
  • +
+

Optional:

+
    +
  • The third field is optional, and is a comma-separated list of options, such as ro (read only).
  • +
+

The synthax looks like the following: +

-v /home/username/your_data_folder:/container_folder
+

+

This is what a full docker command with a mounted volume looks like: +

$ docker run -v /home/$USER/read_cleanup:/work alpine:latest ls -l /work
+

+

So what if we wanted to work interactively inside the container?

+
$ docker run -it -v /home/$USER/read_cleanup:/work alpine:latest sh
+
+

Once you're in the container, you will see that the /work directory is mounted in the working directory.

+

Any data that you add to that folder outside the container will appear INSIDE the container. And any work you do inside the container saved in that folder will be saved OUTSIDE the container as well.

+
+

Working with Interactive Containers

+

Let's go ahead and run some Integrated Development Environment images from "trusted" organizations on the Docker Hub Registry.

+

Jupyter Lab

+

In this section, let's find a Docker image which can run a Jupyter Notebook

+

Search for official images on Docker Hub which contain the string 'jupyter'

+
$ docker search jupyter
+
+

It should return something like:

+
NAME                                   DESCRIPTION                                     STARS     OFFICIAL   AUTOMATED
+jupyter/datascience-notebook           Jupyter Notebook Data Science Stack from htt…   912                  
+jupyter/all-spark-notebook             Jupyter Notebook Python, Scala, R, Spark, Me…   374                  
+jupyter/scipy-notebook                 Jupyter Notebook Scientific Python Stack fro…   337                  
+jupyterhub/jupyterhub                  JupyterHub: multi-user Jupyter notebook serv…   307                  [OK]
+jupyter/tensorflow-notebook            Jupyter Notebook Scientific Python Stack w/ …   298                  
+jupyter/pyspark-notebook               Jupyter Notebook Python, Spark, Mesos Stack …   224                  
+jupyter/base-notebook                  Small base image for Jupyter Notebook stacks…   168                  
+jupyter/minimal-notebook               Minimal Jupyter Notebook Stack from https://…   150                  
+jupyter/r-notebook                     Jupyter Notebook R Stack from https://github…   44                   
+jupyterhub/singleuser                  single-user docker images for use with Jupyt…   43                   [OK]
+jupyter/nbviewer                       Jupyter Notebook Viewer                         27                   [OK]
+
+
+Untrusted community images +

An important thing to note: None of these Jupyter or RStudio images are 'official' Docker images, meaning they could be trojans for spyware, malware, or other nasty warez.

+
+
+

Understanding PORTS

+

When we want to run a container that runs on the open internet, we need to add a TCP or UDP port number from which we can access the application in a browser using the machine's IP (Internet Protocol) address or DNS (Domain Name Service) location.

+

To do this, we need to access the container over a separate port address on the machine we're working on.

+

Docker uses the flag --port or -p for short followed by two sets of port numbers.

+
+Exposing Ports +

Docker can in fact expose all ports to a container using the capital -P flag

+

For security purposes, it is generally NEVER a good idea to expose all ports.

+
+

Typically these numbers can be the same, but in some cases your machine may already be running another program (or container) on that open port.

+

The port has two sides left:right separated by a colon. The left side port number is the INTERNAL port that the container software thinks its using. The right side number is the EXTERNAL port that you can access on your computer (or virtual machine).

+

Here are some examples to run basic RStudio and Jupyter Lab:

+
$ docker run --rm -p 8787:8787 -e PASSWORD=cc2022 rocker/rstudio
+
+

note: on CodeSpaces, the reverse proxy for the DNS requires you to turn off authentication

+
$ docker run --rm -p 8787:8787 -e DISABLE_AUTH=true rocker/rstudio
+
+
$ docker run --rm -p 8888:8888 jupyter/base-notebook
+
+
+Preempting stale containers from your cache +

We've added the --rm flag, which means the container will automatically removed from the cache when the container is exited.

+

When you start an IDE in a terminal, the terminal connection must stay active to keep the container alive.

+
+

Detaching your container while it is running

+

If we want to keep our window in the foreground we can use the -d - the detached flag will run the container as a background process, rather than in the foreground.

+

When you run a container with this flag, it will start, run, telling you the container ID:

+

$ docker run --rm -d -p 8888:8888 jupyter/base-notebook
+
+Note, that your terminal is still active and you can use it to launch more containers.

+

To view the running container, use the docker ps command.

+
+

Docker Commands

+

Here is a compiled list of fundamental Docker Commands:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
CommandUsageExample
pullDownloads an image from Docker Hubdocker pull hello-world:latest
runruns a container with entrypointdocker run -it user/image:tag
buildBuilds a docker image from a Dockerfile in current working directorydocker build -t user/image:tag .
imagesList all images on the local machinedocker images list
tagAdds a different tag name to an imagedocker tag hello-world:latest hello-world:new-tag-name
loginAuthenticate to the Docker Hub (requires username and password)docker login
pushUpload your new image to the Docker Hubdocker push user/image:tag
inspectProvide detailed information on constructs controlled by Dockerdocker inspect containerID
psList all containers on your systemdocker ps -a
rmDelete a stopped or running containerdocker rm -f <container ID>
rmiDelete an image from your cachedocker rmi hello-world:latest
stopStop a running containerdocker stop alpine:latest
systemView system details, remove old images and containers with prunedocker system prune
pushUploads an image to the Docker Hub (or other private registry)docker push username/image:tag
+
+

Self Assessment

+
+A Docker container with the tagname latest ensures old code and data will work on a new computer setup? +
+

Answer

+

Never use the latest tag for a publication or archival.

+

The latest version is always being updated and should be considered "cutting edge".

+

latest is the default tag name of all Docker images

+

latest versions MAY have backward compatibility with older code and data, but this is not always a given

+
+
+
+When are containers the right solution? +
+

Success

+

Containers are valuable when you need to run an analyses on a remote platform, and you need it to work every time.

+
+
+

Failure

+

You need to do some simple text file editing

+

You need to run a web service

+
+
+
+True or False: Docker containers allow for reproducibility across all computing platforms +
+

False

+

While Docker allows you to quickly run software from other people, it may not work across every platform.

+

There are different CPU architectures (arm, amd64, x64,x86`) deployed across cloud, computer workstations, laptops, and cellular phones.

+

Docker containers and their software can be cross-compiled across architectures, but this must be done by the creators.

+
+
+
+When is it advisable to not trust a Docker image? +
+

When you cannot view its Dockerfile

+

Featured and Verified Docker images can be trusted, in general.

+

User generated images should not be trusted unless you can view their Dockerfile, or build logs to determine what is actually in the container you are attempting to run.

+
+
+ +
+
+ + + Last update: + 2023-10-18 + + +
+ + + + + + + + +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/404.html b/404.html new file mode 100644 index 000000000..0d5cf34db --- /dev/null +++ b/404.html @@ -0,0 +1,922 @@ + + + + + + + + + + + + + + + + + + + + + + + CyVerse Foundational Open Science Skills 2023 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/CNAME b/CNAME new file mode 100644 index 000000000..2ebddff2a --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +foss.cyverse.org diff --git a/assets/DataCommons_DrkBlue.png b/assets/DataCommons_DrkBlue.png new file mode 100644 index 000000000..43fd02552 Binary files /dev/null and b/assets/DataCommons_DrkBlue.png differ diff --git a/assets/DataCommons_DrkBlue0.png b/assets/DataCommons_DrkBlue0.png new file mode 100644 index 000000000..43fd02552 Binary files /dev/null and b/assets/DataCommons_DrkBlue0.png differ diff --git a/assets/JupyterShellPrompt.png b/assets/JupyterShellPrompt.png new file mode 100644 index 000000000..f2c69516d Binary files /dev/null and b/assets/JupyterShellPrompt.png differ diff --git a/assets/Layer_Cake_Updated.png b/assets/Layer_Cake_Updated.png new file mode 100644 index 000000000..9be70bf2d Binary files /dev/null and b/assets/Layer_Cake_Updated.png differ diff --git a/assets/Layer_Cake_Updated0.png b/assets/Layer_Cake_Updated0.png new file mode 100644 index 000000000..9be70bf2d Binary files /dev/null and b/assets/Layer_Cake_Updated0.png differ diff --git a/assets/Learningcenter_DkBlue.png b/assets/Learningcenter_DkBlue.png new file mode 100644 index 000000000..876d2da30 Binary files /dev/null and b/assets/Learningcenter_DkBlue.png differ diff --git a/assets/Learningcenter_DkBlue0.png b/assets/Learningcenter_DkBlue0.png new file mode 100644 index 000000000..876d2da30 Binary files /dev/null and b/assets/Learningcenter_DkBlue0.png differ diff --git a/assets/Powered-By-CyVerse-blue.png b/assets/Powered-By-CyVerse-blue.png new file mode 100644 index 000000000..e9f257eda Binary files /dev/null and b/assets/Powered-By-CyVerse-blue.png differ diff --git a/assets/PoweredbyCyverse_LogoSquare.png b/assets/PoweredbyCyverse_LogoSquare.png new file mode 100644 index 000000000..e4bdc73cd Binary files /dev/null and b/assets/PoweredbyCyverse_LogoSquare.png differ diff --git a/assets/PoweredbyCyverse_LogoSquare0.png b/assets/PoweredbyCyverse_LogoSquare0.png new file mode 100644 index 000000000..e4bdc73cd Binary files /dev/null and b/assets/PoweredbyCyverse_LogoSquare0.png differ diff --git a/assets/RO_crate.png b/assets/RO_crate.png new file mode 100644 index 000000000..b0e48fda7 Binary files /dev/null and b/assets/RO_crate.png differ diff --git a/assets/_mkdocstrings.css b/assets/_mkdocstrings.css new file mode 100644 index 000000000..e69de29bb diff --git a/assets/addtool_VICE_1.png b/assets/addtool_VICE_1.png new file mode 100644 index 000000000..f6b66750b Binary files /dev/null and b/assets/addtool_VICE_1.png differ diff --git a/assets/addtool_VICE_10.png b/assets/addtool_VICE_10.png new file mode 100644 index 000000000..f6b66750b Binary files /dev/null and b/assets/addtool_VICE_10.png differ diff --git a/assets/agave/agave.png b/assets/agave/agave.png new file mode 100644 index 000000000..40743beb5 Binary files /dev/null and b/assets/agave/agave.png differ diff --git a/assets/atmo-1.1.png b/assets/atmo-1.1.png new file mode 100644 index 000000000..dc9bfdc0d Binary files /dev/null and b/assets/atmo-1.1.png differ diff --git a/assets/atmo-1.png b/assets/atmo-1.png new file mode 100644 index 000000000..b164f4810 Binary files /dev/null and b/assets/atmo-1.png differ diff --git a/assets/atmo-10.1.png b/assets/atmo-10.1.png new file mode 100644 index 000000000..dc9bfdc0d Binary files /dev/null and b/assets/atmo-10.1.png differ diff --git a/assets/atmo-10.png b/assets/atmo-10.png new file mode 100644 index 000000000..b164f4810 Binary files /dev/null and b/assets/atmo-10.png differ diff --git a/assets/atmo-6.png b/assets/atmo-6.png new file mode 100644 index 000000000..97be7d35c Binary files /dev/null and b/assets/atmo-6.png differ diff --git a/assets/atmo-60.png b/assets/atmo-60.png new file mode 100644 index 000000000..97be7d35c Binary files /dev/null and b/assets/atmo-60.png differ diff --git a/assets/atmo-7.png b/assets/atmo-7.png new file mode 100644 index 000000000..a946626b6 Binary files /dev/null and b/assets/atmo-7.png differ diff --git a/assets/atmo-70.png b/assets/atmo-70.png new file mode 100644 index 000000000..a946626b6 Binary files /dev/null and b/assets/atmo-70.png differ diff --git a/assets/atmo-8.png b/assets/atmo-8.png new file mode 100644 index 000000000..63cacf470 Binary files /dev/null and b/assets/atmo-8.png differ diff --git a/assets/atmo-80.png b/assets/atmo-80.png new file mode 100644 index 000000000..63cacf470 Binary files /dev/null and b/assets/atmo-80.png differ diff --git a/assets/atmo-9.png b/assets/atmo-9.png new file mode 100644 index 000000000..fb5061fce Binary files /dev/null and b/assets/atmo-9.png differ diff --git a/assets/atmo-90.png b/assets/atmo-90.png new file mode 100644 index 000000000..fb5061fce Binary files /dev/null and b/assets/atmo-90.png differ diff --git a/assets/atmo_cp.png b/assets/atmo_cp.png new file mode 100644 index 000000000..9f782b7ff Binary files /dev/null and b/assets/atmo_cp.png differ diff --git a/assets/atmo_cp0.png b/assets/atmo_cp0.png new file mode 100644 index 000000000..9f782b7ff Binary files /dev/null and b/assets/atmo_cp0.png differ diff --git a/assets/atmo_cp00.png b/assets/atmo_cp00.png new file mode 100644 index 000000000..ce2d5924e Binary files /dev/null and b/assets/atmo_cp00.png differ diff --git a/assets/atmo_launch.png b/assets/atmo_launch.png new file mode 100644 index 000000000..1b8222762 Binary files /dev/null and b/assets/atmo_launch.png differ diff --git a/assets/atmo_launch0.png b/assets/atmo_launch0.png new file mode 100644 index 000000000..1b8222762 Binary files /dev/null and b/assets/atmo_launch0.png differ diff --git a/assets/atmo_launch00.png b/assets/atmo_launch00.png new file mode 100644 index 000000000..0e2204d7c Binary files /dev/null and b/assets/atmo_launch00.png differ diff --git a/assets/atmo_launch1.png b/assets/atmo_launch1.png new file mode 100644 index 000000000..d47ef1a6e Binary files /dev/null and b/assets/atmo_launch1.png differ diff --git a/assets/atmo_request.png b/assets/atmo_request.png new file mode 100644 index 000000000..c526d1113 Binary files /dev/null and b/assets/atmo_request.png differ diff --git a/assets/atmo_request0.png b/assets/atmo_request0.png new file mode 100644 index 000000000..c526d1113 Binary files /dev/null and b/assets/atmo_request0.png differ diff --git a/assets/atmo_resources.png b/assets/atmo_resources.png new file mode 100644 index 000000000..3298fec16 Binary files /dev/null and b/assets/atmo_resources.png differ diff --git a/assets/atmo_resources0.png b/assets/atmo_resources0.png new file mode 100644 index 000000000..3298fec16 Binary files /dev/null and b/assets/atmo_resources0.png differ diff --git a/assets/atmosphere/atmosphere-icon.png b/assets/atmosphere/atmosphere-icon.png new file mode 100644 index 000000000..1bd08de30 Binary files /dev/null and b/assets/atmosphere/atmosphere-icon.png differ diff --git a/assets/atmosphere/instance_status.png b/assets/atmosphere/instance_status.png new file mode 100644 index 000000000..4c6d17cf4 Binary files /dev/null and b/assets/atmosphere/instance_status.png differ diff --git a/assets/atmosphere/launch_window.png b/assets/atmosphere/launch_window.png new file mode 100644 index 000000000..0f4086ae1 Binary files /dev/null and b/assets/atmosphere/launch_window.png differ diff --git a/assets/atmosphere/need_more.png b/assets/atmosphere/need_more.png new file mode 100644 index 000000000..8e8bd3601 Binary files /dev/null and b/assets/atmosphere/need_more.png differ diff --git a/assets/atmosphere/project_icon.png b/assets/atmosphere/project_icon.png new file mode 100644 index 000000000..bc1955620 Binary files /dev/null and b/assets/atmosphere/project_icon.png differ diff --git a/assets/atmosphere/request_access.png b/assets/atmosphere/request_access.png new file mode 100644 index 000000000..c8fd3ecbc Binary files /dev/null and b/assets/atmosphere/request_access.png differ diff --git a/assets/atmosphere/vnc_desktop.png b/assets/atmosphere/vnc_desktop.png new file mode 100644 index 000000000..323ce2a0b Binary files /dev/null and b/assets/atmosphere/vnc_desktop.png differ diff --git a/assets/auto-1.png b/assets/auto-1.png new file mode 100644 index 000000000..9d6a9a8fe Binary files /dev/null and b/assets/auto-1.png differ diff --git a/assets/auto-10.png b/assets/auto-10.png new file mode 100644 index 000000000..9d6a9a8fe Binary files /dev/null and b/assets/auto-10.png differ diff --git a/assets/auto-2.png b/assets/auto-2.png new file mode 100644 index 000000000..332eca34b Binary files /dev/null and b/assets/auto-2.png differ diff --git a/assets/auto-20.png b/assets/auto-20.png new file mode 100644 index 000000000..332eca34b Binary files /dev/null and b/assets/auto-20.png differ diff --git a/assets/auto-3.png b/assets/auto-3.png new file mode 100644 index 000000000..581be0014 Binary files /dev/null and b/assets/auto-3.png differ diff --git a/assets/auto-30.png b/assets/auto-30.png new file mode 100644 index 000000000..581be0014 Binary files /dev/null and b/assets/auto-30.png differ diff --git a/assets/auto-4.png b/assets/auto-4.png new file mode 100644 index 000000000..fa9277b0c Binary files /dev/null and b/assets/auto-4.png differ diff --git a/assets/auto-40.png b/assets/auto-40.png new file mode 100644 index 000000000..fa9277b0c Binary files /dev/null and b/assets/auto-40.png differ diff --git a/assets/auto_build-1.png b/assets/auto_build-1.png new file mode 100644 index 000000000..d39894edf Binary files /dev/null and b/assets/auto_build-1.png differ diff --git a/assets/auto_build-10.png b/assets/auto_build-10.png new file mode 100644 index 000000000..d39894edf Binary files /dev/null and b/assets/auto_build-10.png differ diff --git a/assets/auto_build-2.1.png b/assets/auto_build-2.1.png new file mode 100644 index 000000000..fccbf6844 Binary files /dev/null and b/assets/auto_build-2.1.png differ diff --git a/assets/auto_build-2.png b/assets/auto_build-2.png new file mode 100644 index 000000000..3abbc828c Binary files /dev/null and b/assets/auto_build-2.png differ diff --git a/assets/auto_build-20.1.png b/assets/auto_build-20.1.png new file mode 100644 index 000000000..fccbf6844 Binary files /dev/null and b/assets/auto_build-20.1.png differ diff --git a/assets/auto_build-20.png b/assets/auto_build-20.png new file mode 100644 index 000000000..3abbc828c Binary files /dev/null and b/assets/auto_build-20.png differ diff --git a/assets/auto_build-3.png b/assets/auto_build-3.png new file mode 100644 index 000000000..c07414273 Binary files /dev/null and b/assets/auto_build-3.png differ diff --git a/assets/auto_build-30.png b/assets/auto_build-30.png new file mode 100644 index 000000000..c07414273 Binary files /dev/null and b/assets/auto_build-30.png differ diff --git a/assets/auto_build-4.png b/assets/auto_build-4.png new file mode 100644 index 000000000..8c9d53eef Binary files /dev/null and b/assets/auto_build-4.png differ diff --git a/assets/auto_build-40.png b/assets/auto_build-40.png new file mode 100644 index 000000000..8c9d53eef Binary files /dev/null and b/assets/auto_build-40.png differ diff --git a/assets/auto_build-5.png b/assets/auto_build-5.png new file mode 100644 index 000000000..71aa68c69 Binary files /dev/null and b/assets/auto_build-5.png differ diff --git a/assets/auto_build-50.png b/assets/auto_build-50.png new file mode 100644 index 000000000..71aa68c69 Binary files /dev/null and b/assets/auto_build-50.png differ diff --git a/assets/auto_build-6.png b/assets/auto_build-6.png new file mode 100644 index 000000000..2787d2ceb Binary files /dev/null and b/assets/auto_build-6.png differ diff --git a/assets/auto_build-60.png b/assets/auto_build-60.png new file mode 100644 index 000000000..2787d2ceb Binary files /dev/null and b/assets/auto_build-60.png differ diff --git a/assets/auto_build-7.png b/assets/auto_build-7.png new file mode 100644 index 000000000..2f8882681 Binary files /dev/null and b/assets/auto_build-7.png differ diff --git a/assets/auto_build-70.png b/assets/auto_build-70.png new file mode 100644 index 000000000..2f8882681 Binary files /dev/null and b/assets/auto_build-70.png differ diff --git a/assets/bind_mount.png b/assets/bind_mount.png new file mode 100644 index 000000000..2d8d4091b Binary files /dev/null and b/assets/bind_mount.png differ diff --git a/assets/bind_mount0.png b/assets/bind_mount0.png new file mode 100644 index 000000000..2d8d4091b Binary files /dev/null and b/assets/bind_mount0.png differ diff --git a/assets/biocontainer-1.png b/assets/biocontainer-1.png new file mode 100644 index 000000000..b06247b0b Binary files /dev/null and b/assets/biocontainer-1.png differ diff --git a/assets/biocontainer-10.png b/assets/biocontainer-10.png new file mode 100644 index 000000000..b06247b0b Binary files /dev/null and b/assets/biocontainer-10.png differ diff --git a/assets/biocontainer-2.png b/assets/biocontainer-2.png new file mode 100644 index 000000000..1daf92967 Binary files /dev/null and b/assets/biocontainer-2.png differ diff --git a/assets/biocontainer-20.png b/assets/biocontainer-20.png new file mode 100644 index 000000000..1daf92967 Binary files /dev/null and b/assets/biocontainer-20.png differ diff --git a/assets/biocontainer-3.png b/assets/biocontainer-3.png new file mode 100644 index 000000000..2f1bca1bb Binary files /dev/null and b/assets/biocontainer-3.png differ diff --git a/assets/biocontainer-30.png b/assets/biocontainer-30.png new file mode 100644 index 000000000..2f1bca1bb Binary files /dev/null and b/assets/biocontainer-30.png differ diff --git a/assets/biocontainer-4.png b/assets/biocontainer-4.png new file mode 100644 index 000000000..111d84177 Binary files /dev/null and b/assets/biocontainer-4.png differ diff --git a/assets/biocontainer-40.png b/assets/biocontainer-40.png new file mode 100644 index 000000000..111d84177 Binary files /dev/null and b/assets/biocontainer-40.png differ diff --git a/assets/biocontainer-5.png b/assets/biocontainer-5.png new file mode 100644 index 000000000..7bf238e60 Binary files /dev/null and b/assets/biocontainer-5.png differ diff --git a/assets/biocontainer-50.png b/assets/biocontainer-50.png new file mode 100644 index 000000000..7bf238e60 Binary files /dev/null and b/assets/biocontainer-50.png differ diff --git a/assets/biocontainer15.png b/assets/biocontainer15.png new file mode 100644 index 000000000..ff0f92adf Binary files /dev/null and b/assets/biocontainer15.png differ diff --git a/assets/biocontainer150.png b/assets/biocontainer150.png new file mode 100644 index 000000000..ff0f92adf Binary files /dev/null and b/assets/biocontainer150.png differ diff --git a/assets/biocontainers1.png b/assets/biocontainers1.png new file mode 100644 index 000000000..139729073 Binary files /dev/null and b/assets/biocontainers1.png differ diff --git a/assets/biocontainers10.png b/assets/biocontainers10.png new file mode 100644 index 000000000..139729073 Binary files /dev/null and b/assets/biocontainers10.png differ diff --git a/assets/biocontainers11.png b/assets/biocontainers11.png new file mode 100644 index 000000000..459cdbc07 Binary files /dev/null and b/assets/biocontainers11.png differ diff --git a/assets/biocontainers110.png b/assets/biocontainers110.png new file mode 100644 index 000000000..459cdbc07 Binary files /dev/null and b/assets/biocontainers110.png differ diff --git a/assets/biocontainers12.png b/assets/biocontainers12.png new file mode 100644 index 000000000..9205ce7a5 Binary files /dev/null and b/assets/biocontainers12.png differ diff --git a/assets/biocontainers120.png b/assets/biocontainers120.png new file mode 100644 index 000000000..9205ce7a5 Binary files /dev/null and b/assets/biocontainers120.png differ diff --git a/assets/biocontainers13.png b/assets/biocontainers13.png new file mode 100644 index 000000000..699513e25 Binary files /dev/null and b/assets/biocontainers13.png differ diff --git a/assets/biocontainers130.png b/assets/biocontainers130.png new file mode 100644 index 000000000..699513e25 Binary files /dev/null and b/assets/biocontainers130.png differ diff --git a/assets/biocontainers15.png b/assets/biocontainers15.png new file mode 100644 index 000000000..ff0f92adf Binary files /dev/null and b/assets/biocontainers15.png differ diff --git a/assets/biocontainers150.png b/assets/biocontainers150.png new file mode 100644 index 000000000..ff0f92adf Binary files /dev/null and b/assets/biocontainers150.png differ diff --git a/assets/biocontainers16.png b/assets/biocontainers16.png new file mode 100644 index 000000000..d58230f00 Binary files /dev/null and b/assets/biocontainers16.png differ diff --git a/assets/biocontainers160.png b/assets/biocontainers160.png new file mode 100644 index 000000000..d58230f00 Binary files /dev/null and b/assets/biocontainers160.png differ diff --git a/assets/biocontainers18.png b/assets/biocontainers18.png new file mode 100644 index 000000000..48fd18589 Binary files /dev/null and b/assets/biocontainers18.png differ diff --git a/assets/biocontainers180.png b/assets/biocontainers180.png new file mode 100644 index 000000000..48fd18589 Binary files /dev/null and b/assets/biocontainers180.png differ diff --git a/assets/biocontainers19.png b/assets/biocontainers19.png new file mode 100644 index 000000000..deed89ff4 Binary files /dev/null and b/assets/biocontainers19.png differ diff --git a/assets/biocontainers190.png b/assets/biocontainers190.png new file mode 100644 index 000000000..deed89ff4 Binary files /dev/null and b/assets/biocontainers190.png differ diff --git a/assets/biocontainers2.png b/assets/biocontainers2.png new file mode 100644 index 000000000..d64cf4c19 Binary files /dev/null and b/assets/biocontainers2.png differ diff --git a/assets/biocontainers20.png b/assets/biocontainers20.png new file mode 100644 index 000000000..9afa2df07 Binary files /dev/null and b/assets/biocontainers20.png differ diff --git a/assets/biocontainers200.png b/assets/biocontainers200.png new file mode 100644 index 000000000..9afa2df07 Binary files /dev/null and b/assets/biocontainers200.png differ diff --git a/assets/biocontainers21.png b/assets/biocontainers21.png new file mode 100644 index 000000000..5ef304cff Binary files /dev/null and b/assets/biocontainers21.png differ diff --git a/assets/biocontainers210.png b/assets/biocontainers210.png new file mode 100644 index 000000000..5ef304cff Binary files /dev/null and b/assets/biocontainers210.png differ diff --git a/assets/biocontainers22.png b/assets/biocontainers22.png new file mode 100644 index 000000000..d64cf4c19 Binary files /dev/null and b/assets/biocontainers22.png differ diff --git a/assets/biocontainers3.png b/assets/biocontainers3.png new file mode 100644 index 000000000..c6f5ae2db Binary files /dev/null and b/assets/biocontainers3.png differ diff --git a/assets/biocontainers30.png b/assets/biocontainers30.png new file mode 100644 index 000000000..c6f5ae2db Binary files /dev/null and b/assets/biocontainers30.png differ diff --git a/assets/biocontainers5a.png b/assets/biocontainers5a.png new file mode 100644 index 000000000..bbcb0d3c6 Binary files /dev/null and b/assets/biocontainers5a.png differ diff --git a/assets/biocontainers5a0.png b/assets/biocontainers5a0.png new file mode 100644 index 000000000..bbcb0d3c6 Binary files /dev/null and b/assets/biocontainers5a0.png differ diff --git a/assets/biocontainers6.png b/assets/biocontainers6.png new file mode 100644 index 000000000..0332127f3 Binary files /dev/null and b/assets/biocontainers6.png differ diff --git a/assets/biocontainers60.png b/assets/biocontainers60.png new file mode 100644 index 000000000..0332127f3 Binary files /dev/null and b/assets/biocontainers60.png differ diff --git a/assets/biocontainers8.png b/assets/biocontainers8.png new file mode 100644 index 000000000..c5b6d1d96 Binary files /dev/null and b/assets/biocontainers8.png differ diff --git a/assets/biocontainers80.png b/assets/biocontainers80.png new file mode 100644 index 000000000..c5b6d1d96 Binary files /dev/null and b/assets/biocontainers80.png differ diff --git a/assets/biocontainers9.png b/assets/biocontainers9.png new file mode 100644 index 000000000..286589b8b Binary files /dev/null and b/assets/biocontainers9.png differ diff --git a/assets/biocontainers90.png b/assets/biocontainers90.png new file mode 100644 index 000000000..286589b8b Binary files /dev/null and b/assets/biocontainers90.png differ diff --git a/assets/bisque/bisque-icon.png b/assets/bisque/bisque-icon.png new file mode 100644 index 000000000..3c15e3d11 Binary files /dev/null and b/assets/bisque/bisque-icon.png differ diff --git a/assets/catpic-1.png b/assets/catpic-1.png new file mode 100644 index 000000000..941eaf8f3 Binary files /dev/null and b/assets/catpic-1.png differ diff --git a/assets/catpic-10.png b/assets/catpic-10.png new file mode 100644 index 000000000..941eaf8f3 Binary files /dev/null and b/assets/catpic-10.png differ diff --git a/assets/cc-main.png b/assets/cc-main.png new file mode 100644 index 000000000..46856609b Binary files /dev/null and b/assets/cc-main.png differ diff --git a/assets/cc-main0.png b/assets/cc-main0.png new file mode 100644 index 000000000..46856609b Binary files /dev/null and b/assets/cc-main0.png differ diff --git a/assets/cmd1.png b/assets/cmd1.png new file mode 100644 index 000000000..011674092 Binary files /dev/null and b/assets/cmd1.png differ diff --git a/assets/cmd10.png b/assets/cmd10.png new file mode 100644 index 000000000..011674092 Binary files /dev/null and b/assets/cmd10.png differ diff --git a/assets/cmd15.png b/assets/cmd15.png new file mode 100644 index 000000000..1646a9961 Binary files /dev/null and b/assets/cmd15.png differ diff --git a/assets/cmd150.png b/assets/cmd150.png new file mode 100644 index 000000000..1646a9961 Binary files /dev/null and b/assets/cmd150.png differ diff --git a/assets/cmd16.png b/assets/cmd16.png new file mode 100644 index 000000000..a478d3b12 Binary files /dev/null and b/assets/cmd16.png differ diff --git a/assets/cmd160.png b/assets/cmd160.png new file mode 100644 index 000000000..a478d3b12 Binary files /dev/null and b/assets/cmd160.png differ diff --git a/assets/cmd2.png b/assets/cmd2.png new file mode 100644 index 000000000..6800b124d Binary files /dev/null and b/assets/cmd2.png differ diff --git a/assets/cmd20.png b/assets/cmd20.png new file mode 100644 index 000000000..6800b124d Binary files /dev/null and b/assets/cmd20.png differ diff --git a/assets/cmd3.png b/assets/cmd3.png new file mode 100644 index 000000000..2884dd17e Binary files /dev/null and b/assets/cmd3.png differ diff --git a/assets/cmd30.png b/assets/cmd30.png new file mode 100644 index 000000000..2884dd17e Binary files /dev/null and b/assets/cmd30.png differ diff --git a/assets/containers1.png b/assets/containers1.png new file mode 100644 index 000000000..c1c0734a7 Binary files /dev/null and b/assets/containers1.png differ diff --git a/assets/create_repo.png b/assets/create_repo.png new file mode 100644 index 000000000..54a6e3f3d Binary files /dev/null and b/assets/create_repo.png differ diff --git a/assets/create_repo0.png b/assets/create_repo0.png new file mode 100644 index 000000000..54a6e3f3d Binary files /dev/null and b/assets/create_repo0.png differ diff --git a/assets/create_repo2.png b/assets/create_repo2.png new file mode 100644 index 000000000..7b8ad535c Binary files /dev/null and b/assets/create_repo2.png differ diff --git a/assets/create_repo20.png b/assets/create_repo20.png new file mode 100644 index 000000000..7b8ad535c Binary files /dev/null and b/assets/create_repo20.png differ diff --git a/assets/createapp_VICE_1.png b/assets/createapp_VICE_1.png new file mode 100644 index 000000000..8b9e7ab26 Binary files /dev/null and b/assets/createapp_VICE_1.png differ diff --git a/assets/createnewrepo.png b/assets/createnewrepo.png new file mode 100644 index 000000000..81f76cc91 Binary files /dev/null and b/assets/createnewrepo.png differ diff --git a/assets/cyverse_cmyk.png b/assets/cyverse_cmyk.png new file mode 100644 index 000000000..0e5845473 Binary files /dev/null and b/assets/cyverse_cmyk.png differ diff --git a/assets/cyverse_cmyk0.png b/assets/cyverse_cmyk0.png new file mode 100644 index 000000000..0e5845473 Binary files /dev/null and b/assets/cyverse_cmyk0.png differ diff --git a/assets/cyverse_globe_cmyk.png b/assets/cyverse_globe_cmyk.png new file mode 100644 index 000000000..33b2fa200 Binary files /dev/null and b/assets/cyverse_globe_cmyk.png differ diff --git a/assets/cyverse_globe_cmyk0.png b/assets/cyverse_globe_cmyk0.png new file mode 100644 index 000000000..33b2fa200 Binary files /dev/null and b/assets/cyverse_globe_cmyk0.png differ diff --git a/assets/cyverse_icon_tm_space.png b/assets/cyverse_icon_tm_space.png new file mode 100644 index 000000000..a5cb4244f Binary files /dev/null and b/assets/cyverse_icon_tm_space.png differ diff --git a/assets/cyverse_icon_tm_space0.png b/assets/cyverse_icon_tm_space0.png new file mode 100644 index 000000000..a5cb4244f Binary files /dev/null and b/assets/cyverse_icon_tm_space0.png differ diff --git a/assets/cyverse_icon_tm_space1.png b/assets/cyverse_icon_tm_space1.png new file mode 100644 index 000000000..a5cb4244f Binary files /dev/null and b/assets/cyverse_icon_tm_space1.png differ diff --git a/assets/cyverse_learning.png b/assets/cyverse_learning.png new file mode 100644 index 000000000..d1d5de1f6 Binary files /dev/null and b/assets/cyverse_learning.png differ diff --git a/assets/cyverse_platform_stack.png b/assets/cyverse_platform_stack.png new file mode 100644 index 000000000..095e884b7 Binary files /dev/null and b/assets/cyverse_platform_stack.png differ diff --git a/assets/cyverse_platform_stack0.png b/assets/cyverse_platform_stack0.png new file mode 100644 index 000000000..095e884b7 Binary files /dev/null and b/assets/cyverse_platform_stack0.png differ diff --git a/assets/cyverse_rgb.png b/assets/cyverse_rgb.png new file mode 100644 index 000000000..90378af57 Binary files /dev/null and b/assets/cyverse_rgb.png differ diff --git a/assets/cyverse_rgb0.png b/assets/cyverse_rgb0.png new file mode 100644 index 000000000..90378af57 Binary files /dev/null and b/assets/cyverse_rgb0.png differ diff --git a/assets/cyverse_rgb1.png b/assets/cyverse_rgb1.png new file mode 100644 index 000000000..90378af57 Binary files /dev/null and b/assets/cyverse_rgb1.png differ diff --git a/assets/data_life_cycle.png b/assets/data_life_cycle.png new file mode 100644 index 000000000..c24e64b9a Binary files /dev/null and b/assets/data_life_cycle.png differ diff --git a/assets/data_life_cycle0.png b/assets/data_life_cycle0.png new file mode 100644 index 000000000..c24e64b9a Binary files /dev/null and b/assets/data_life_cycle0.png differ diff --git a/assets/data_store/datastore-icon.png b/assets/data_store/datastore-icon.png new file mode 100644 index 000000000..dc55b3e3d Binary files /dev/null and b/assets/data_store/datastore-icon.png differ diff --git a/assets/data_store/datastore_plchldr.png b/assets/data_store/datastore_plchldr.png new file mode 100644 index 000000000..886defb3e Binary files /dev/null and b/assets/data_store/datastore_plchldr.png differ diff --git a/assets/dc-1.png b/assets/dc-1.png new file mode 100644 index 000000000..003f402ad Binary files /dev/null and b/assets/dc-1.png differ diff --git a/assets/dc-10.png b/assets/dc-10.png new file mode 100644 index 000000000..003f402ad Binary files /dev/null and b/assets/dc-10.png differ diff --git a/assets/de/analyses_icon.png b/assets/de/analyses_icon.png new file mode 100644 index 000000000..a79efc402 Binary files /dev/null and b/assets/de/analyses_icon.png differ diff --git a/assets/de/apps_icon.png b/assets/de/apps_icon.png new file mode 100644 index 000000000..7f12f4eac Binary files /dev/null and b/assets/de/apps_icon.png differ diff --git a/assets/de/beta.png b/assets/de/beta.png new file mode 100644 index 000000000..8c988ab68 Binary files /dev/null and b/assets/de/beta.png differ diff --git a/assets/de/comment.png b/assets/de/comment.png new file mode 100644 index 000000000..cb6be4ce4 Binary files /dev/null and b/assets/de/comment.png differ diff --git a/assets/de/data_icon.png b/assets/de/data_icon.png new file mode 100644 index 000000000..ff73ec4b7 Binary files /dev/null and b/assets/de/data_icon.png differ diff --git a/assets/de/data_window.png b/assets/de/data_window.png new file mode 100644 index 000000000..c833cebde Binary files /dev/null and b/assets/de/data_window.png differ diff --git a/assets/de/de-icon.png b/assets/de/de-icon.png new file mode 100644 index 000000000..b009be1e7 Binary files /dev/null and b/assets/de/de-icon.png differ diff --git a/assets/de/de_app_icon.png b/assets/de/de_app_icon.png new file mode 100644 index 000000000..40cbe528e Binary files /dev/null and b/assets/de/de_app_icon.png differ diff --git a/assets/de/delete_icon.png b/assets/de/delete_icon.png new file mode 100644 index 000000000..86dd8b0c6 Binary files /dev/null and b/assets/de/delete_icon.png differ diff --git a/assets/de/favorite.png b/assets/de/favorite.png new file mode 100644 index 000000000..c0da08bdd Binary files /dev/null and b/assets/de/favorite.png differ diff --git a/assets/de/info.png b/assets/de/info.png new file mode 100644 index 000000000..cc9481416 Binary files /dev/null and b/assets/de/info.png differ diff --git a/assets/de/link_icon.png b/assets/de/link_icon.png new file mode 100644 index 000000000..60973cb39 Binary files /dev/null and b/assets/de/link_icon.png differ diff --git a/assets/de/manage_data_links.png b/assets/de/manage_data_links.png new file mode 100644 index 000000000..8e318d122 Binary files /dev/null and b/assets/de/manage_data_links.png differ diff --git a/assets/de/manage_sharing_menu.png b/assets/de/manage_sharing_menu.png new file mode 100644 index 000000000..8d0c90d1a Binary files /dev/null and b/assets/de/manage_sharing_menu.png differ diff --git a/assets/de/muscle_app_window.png b/assets/de/muscle_app_window.png new file mode 100644 index 000000000..5d4f63607 Binary files /dev/null and b/assets/de/muscle_app_window.png differ diff --git a/assets/de/person_icon.png b/assets/de/person_icon.png new file mode 100644 index 000000000..b5d5c42d9 Binary files /dev/null and b/assets/de/person_icon.png differ diff --git a/assets/de/private.png b/assets/de/private.png new file mode 100644 index 000000000..41213cddf Binary files /dev/null and b/assets/de/private.png differ diff --git a/assets/de/rating.png b/assets/de/rating.png new file mode 100644 index 000000000..0fd4c26fc Binary files /dev/null and b/assets/de/rating.png differ diff --git a/assets/de/unavailable.png b/assets/de/unavailable.png new file mode 100644 index 000000000..b5f390a1f Binary files /dev/null and b/assets/de/unavailable.png differ diff --git a/assets/de/viewing_window.png b/assets/de/viewing_window.png new file mode 100644 index 000000000..75b791827 Binary files /dev/null and b/assets/de/viewing_window.png differ diff --git a/assets/de2_datastore1.png b/assets/de2_datastore1.png new file mode 100644 index 000000000..d99c0afb7 Binary files /dev/null and b/assets/de2_datastore1.png differ diff --git a/assets/de2_datastore2.png b/assets/de2_datastore2.png new file mode 100644 index 000000000..ae01390de Binary files /dev/null and b/assets/de2_datastore2.png differ diff --git a/assets/de2_datastore3.png b/assets/de2_datastore3.png new file mode 100644 index 000000000..669c4e6d5 Binary files /dev/null and b/assets/de2_datastore3.png differ diff --git a/assets/de2_datastore4.png b/assets/de2_datastore4.png new file mode 100644 index 000000000..aba2cef99 Binary files /dev/null and b/assets/de2_datastore4.png differ diff --git a/assets/de2_select_input.png b/assets/de2_select_input.png new file mode 100644 index 000000000..8851fb256 Binary files /dev/null and b/assets/de2_select_input.png differ diff --git a/assets/dna_subway/dnasubway-icon.png b/assets/dna_subway/dnasubway-icon.png new file mode 100644 index 000000000..0c3af3668 Binary files /dev/null and b/assets/dna_subway/dnasubway-icon.png differ diff --git a/assets/docker.png b/assets/docker.png new file mode 100644 index 000000000..d1dad458b Binary files /dev/null and b/assets/docker.png differ diff --git a/assets/docker0.png b/assets/docker0.png new file mode 100644 index 000000000..d1dad458b Binary files /dev/null and b/assets/docker0.png differ diff --git a/assets/docker_image.png b/assets/docker_image.png new file mode 100644 index 000000000..f00d38d8d Binary files /dev/null and b/assets/docker_image.png differ diff --git a/assets/docker_image0.png b/assets/docker_image0.png new file mode 100644 index 000000000..f00d38d8d Binary files /dev/null and b/assets/docker_image0.png differ diff --git a/assets/dockerhub_autobuild.png b/assets/dockerhub_autobuild.png new file mode 100644 index 000000000..d726d4e5b Binary files /dev/null and b/assets/dockerhub_autobuild.png differ diff --git a/assets/dockerhub_autobuild0.png b/assets/dockerhub_autobuild0.png new file mode 100644 index 000000000..d726d4e5b Binary files /dev/null and b/assets/dockerhub_autobuild0.png differ diff --git a/assets/dockerhub_autobuilds.png b/assets/dockerhub_autobuilds.png new file mode 100644 index 000000000..28fff8ace Binary files /dev/null and b/assets/dockerhub_autobuilds.png differ diff --git a/assets/dockerhub_autobuilds0.png b/assets/dockerhub_autobuilds0.png new file mode 100644 index 000000000..28fff8ace Binary files /dev/null and b/assets/dockerhub_autobuilds0.png differ diff --git a/assets/dockerhub_buildsettings.png b/assets/dockerhub_buildsettings.png new file mode 100644 index 000000000..aa2950e59 Binary files /dev/null and b/assets/dockerhub_buildsettings.png differ diff --git a/assets/dockerhub_buildsettings0.png b/assets/dockerhub_buildsettings0.png new file mode 100644 index 000000000..aa2950e59 Binary files /dev/null and b/assets/dockerhub_buildsettings0.png differ diff --git a/assets/dockerhub_create.png b/assets/dockerhub_create.png new file mode 100644 index 000000000..b998ab277 Binary files /dev/null and b/assets/dockerhub_create.png differ diff --git a/assets/dockerhub_create0.png b/assets/dockerhub_create0.png new file mode 100644 index 000000000..b998ab277 Binary files /dev/null and b/assets/dockerhub_create0.png differ diff --git a/assets/dockerhub_createrepo.png b/assets/dockerhub_createrepo.png new file mode 100644 index 000000000..877d39804 Binary files /dev/null and b/assets/dockerhub_createrepo.png differ diff --git a/assets/dockerhub_createrepo0.png b/assets/dockerhub_createrepo0.png new file mode 100644 index 000000000..877d39804 Binary files /dev/null and b/assets/dockerhub_createrepo0.png differ diff --git a/assets/evolutionofcyverse.png b/assets/evolutionofcyverse.png new file mode 100644 index 000000000..7451c9d8a Binary files /dev/null and b/assets/evolutionofcyverse.png differ diff --git a/assets/evolutionofcyverse0.png b/assets/evolutionofcyverse0.png new file mode 100644 index 000000000..7451c9d8a Binary files /dev/null and b/assets/evolutionofcyverse0.png differ diff --git a/assets/f1000.png b/assets/f1000.png new file mode 100644 index 000000000..15c5a33b5 Binary files /dev/null and b/assets/f1000.png differ diff --git a/assets/f10000.png b/assets/f10000.png new file mode 100644 index 000000000..15c5a33b5 Binary files /dev/null and b/assets/f10000.png differ diff --git a/assets/fastqe.png b/assets/fastqe.png new file mode 100644 index 000000000..6656437ed Binary files /dev/null and b/assets/fastqe.png differ diff --git a/assets/fastqe0.png b/assets/fastqe0.png new file mode 100644 index 000000000..6656437ed Binary files /dev/null and b/assets/fastqe0.png differ diff --git a/assets/five_schools.png b/assets/five_schools.png new file mode 100644 index 000000000..285f75af4 Binary files /dev/null and b/assets/five_schools.png differ diff --git a/assets/foss-main.png b/assets/foss-main.png new file mode 100644 index 000000000..3ce696219 Binary files /dev/null and b/assets/foss-main.png differ diff --git a/assets/foss-main0.png b/assets/foss-main0.png new file mode 100644 index 000000000..13a42a2cb Binary files /dev/null and b/assets/foss-main0.png differ diff --git a/assets/foss_folder_contents_de2.png b/assets/foss_folder_contents_de2.png new file mode 100644 index 000000000..c88162c1c Binary files /dev/null and b/assets/foss_folder_contents_de2.png differ diff --git a/assets/foss_folder_de2.png b/assets/foss_folder_de2.png new file mode 100644 index 000000000..c9cf5b401 Binary files /dev/null and b/assets/foss_folder_de2.png differ diff --git a/assets/foss_title.png b/assets/foss_title.png new file mode 100644 index 000000000..16819c7c3 Binary files /dev/null and b/assets/foss_title.png differ diff --git a/assets/git-logo.png b/assets/git-logo.png new file mode 100644 index 000000000..cf5cdcb01 Binary files /dev/null and b/assets/git-logo.png differ diff --git a/assets/git_1.png b/assets/git_1.png new file mode 100644 index 000000000..e5060458e Binary files /dev/null and b/assets/git_1.png differ diff --git a/assets/git_10.png b/assets/git_10.png new file mode 100644 index 000000000..ed8f905d7 Binary files /dev/null and b/assets/git_10.png differ diff --git a/assets/git_11.png b/assets/git_11.png new file mode 100644 index 000000000..76a10ce82 Binary files /dev/null and b/assets/git_11.png differ diff --git a/assets/git_12.png b/assets/git_12.png new file mode 100644 index 000000000..ac7543e7b Binary files /dev/null and b/assets/git_12.png differ diff --git a/assets/git_13.png b/assets/git_13.png new file mode 100644 index 000000000..a6dcc6629 Binary files /dev/null and b/assets/git_13.png differ diff --git a/assets/git_14.png b/assets/git_14.png new file mode 100644 index 000000000..ba600054b Binary files /dev/null and b/assets/git_14.png differ diff --git a/assets/git_15.png b/assets/git_15.png new file mode 100644 index 000000000..8444cd048 Binary files /dev/null and b/assets/git_15.png differ diff --git a/assets/git_16.png b/assets/git_16.png new file mode 100644 index 000000000..1ba4e9c66 Binary files /dev/null and b/assets/git_16.png differ diff --git a/assets/git_17.png b/assets/git_17.png new file mode 100644 index 000000000..67b95cbba Binary files /dev/null and b/assets/git_17.png differ diff --git a/assets/git_18.png b/assets/git_18.png new file mode 100644 index 000000000..cfb6e7cb9 Binary files /dev/null and b/assets/git_18.png differ diff --git a/assets/git_19.png b/assets/git_19.png new file mode 100644 index 000000000..0334a29ad Binary files /dev/null and b/assets/git_19.png differ diff --git a/assets/git_2.png b/assets/git_2.png new file mode 100644 index 000000000..a4b4914c0 Binary files /dev/null and b/assets/git_2.png differ diff --git a/assets/git_20.png b/assets/git_20.png new file mode 100644 index 000000000..98d722459 Binary files /dev/null and b/assets/git_20.png differ diff --git a/assets/git_21.png b/assets/git_21.png new file mode 100644 index 000000000..e52fc27e8 Binary files /dev/null and b/assets/git_21.png differ diff --git a/assets/git_3.png b/assets/git_3.png new file mode 100644 index 000000000..82622e57c Binary files /dev/null and b/assets/git_3.png differ diff --git a/assets/git_4.png b/assets/git_4.png new file mode 100644 index 000000000..2ceec6cfd Binary files /dev/null and b/assets/git_4.png differ diff --git a/assets/git_5.png b/assets/git_5.png new file mode 100644 index 000000000..356d7a23a Binary files /dev/null and b/assets/git_5.png differ diff --git a/assets/git_6.png b/assets/git_6.png new file mode 100644 index 000000000..ca9cee5e2 Binary files /dev/null and b/assets/git_6.png differ diff --git a/assets/git_7.png b/assets/git_7.png new file mode 100644 index 000000000..9489dd5f1 Binary files /dev/null and b/assets/git_7.png differ diff --git a/assets/git_8.png b/assets/git_8.png new file mode 100644 index 000000000..8f4242fa3 Binary files /dev/null and b/assets/git_8.png differ diff --git a/assets/git_9.png b/assets/git_9.png new file mode 100644 index 000000000..02183eee6 Binary files /dev/null and b/assets/git_9.png differ diff --git a/assets/gitrepo.png b/assets/gitrepo.png new file mode 100644 index 000000000..cfd5f24fb Binary files /dev/null and b/assets/gitrepo.png differ diff --git a/assets/homeicon.png b/assets/homeicon.png new file mode 100644 index 000000000..f06bed059 Binary files /dev/null and b/assets/homeicon.png differ diff --git a/assets/homeicon0.png b/assets/homeicon0.png new file mode 100644 index 000000000..f06bed059 Binary files /dev/null and b/assets/homeicon0.png differ diff --git a/assets/homeicon1.png b/assets/homeicon1.png new file mode 100644 index 000000000..f06bed059 Binary files /dev/null and b/assets/homeicon1.png differ diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 000000000..1cf13b9f9 Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/img_building_1.png b/assets/img_building_1.png new file mode 100644 index 000000000..c3e77ac24 Binary files /dev/null and b/assets/img_building_1.png differ diff --git a/assets/img_building_10.png b/assets/img_building_10.png new file mode 100644 index 000000000..c3e77ac24 Binary files /dev/null and b/assets/img_building_10.png differ diff --git a/assets/img_building_3.png b/assets/img_building_3.png new file mode 100644 index 000000000..7aefe8b18 Binary files /dev/null and b/assets/img_building_3.png differ diff --git a/assets/img_building_30.png b/assets/img_building_30.png new file mode 100644 index 000000000..7aefe8b18 Binary files /dev/null and b/assets/img_building_30.png differ diff --git a/assets/img_building_4.png b/assets/img_building_4.png new file mode 100644 index 000000000..3907fbe9b Binary files /dev/null and b/assets/img_building_4.png differ diff --git a/assets/img_building_40.png b/assets/img_building_40.png new file mode 100644 index 000000000..3907fbe9b Binary files /dev/null and b/assets/img_building_40.png differ diff --git a/assets/img_building_5.png b/assets/img_building_5.png new file mode 100644 index 000000000..3233b95ec Binary files /dev/null and b/assets/img_building_5.png differ diff --git a/assets/img_building_50.png b/assets/img_building_50.png new file mode 100644 index 000000000..3233b95ec Binary files /dev/null and b/assets/img_building_50.png differ diff --git a/assets/img_building_6.png b/assets/img_building_6.png new file mode 100644 index 000000000..38477c0d1 Binary files /dev/null and b/assets/img_building_6.png differ diff --git a/assets/img_building_60.png b/assets/img_building_60.png new file mode 100644 index 000000000..38477c0d1 Binary files /dev/null and b/assets/img_building_60.png differ diff --git a/assets/img_building_7.png b/assets/img_building_7.png new file mode 100644 index 000000000..1b59f438e Binary files /dev/null and b/assets/img_building_7.png differ diff --git a/assets/img_building_70.png b/assets/img_building_70.png new file mode 100644 index 000000000..1b59f438e Binary files /dev/null and b/assets/img_building_70.png differ diff --git a/assets/img_building_8.png b/assets/img_building_8.png new file mode 100644 index 000000000..8d02ed2c6 Binary files /dev/null and b/assets/img_building_8.png differ diff --git a/assets/img_building_80.png b/assets/img_building_80.png new file mode 100644 index 000000000..8d02ed2c6 Binary files /dev/null and b/assets/img_building_80.png differ diff --git a/assets/img_building_9.png b/assets/img_building_9.png new file mode 100644 index 000000000..6f1117b1b Binary files /dev/null and b/assets/img_building_9.png differ diff --git a/assets/img_building_90.png b/assets/img_building_90.png new file mode 100644 index 000000000..6f1117b1b Binary files /dev/null and b/assets/img_building_90.png differ diff --git a/assets/instructors_code.png b/assets/instructors_code.png new file mode 100644 index 000000000..0a3fdc8e1 Binary files /dev/null and b/assets/instructors_code.png differ diff --git a/assets/instructors_code0.png b/assets/instructors_code0.png new file mode 100644 index 000000000..0a3fdc8e1 Binary files /dev/null and b/assets/instructors_code0.png differ diff --git a/assets/intercom.png b/assets/intercom.png new file mode 100644 index 000000000..6c578f63f Binary files /dev/null and b/assets/intercom.png differ diff --git a/assets/intercomlogo.png b/assets/intercomlogo.png new file mode 100644 index 000000000..5b4247cac Binary files /dev/null and b/assets/intercomlogo.png differ diff --git a/assets/intercomlogo0.png b/assets/intercomlogo0.png new file mode 100644 index 000000000..5b4247cac Binary files /dev/null and b/assets/intercomlogo0.png differ diff --git a/assets/javascripts/bundle.aecac24b.min.js b/assets/javascripts/bundle.aecac24b.min.js new file mode 100644 index 000000000..464603d80 --- /dev/null +++ b/assets/javascripts/bundle.aecac24b.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var wi=Object.create;var ur=Object.defineProperty;var Si=Object.getOwnPropertyDescriptor;var Ti=Object.getOwnPropertyNames,kt=Object.getOwnPropertySymbols,Oi=Object.getPrototypeOf,dr=Object.prototype.hasOwnProperty,Zr=Object.prototype.propertyIsEnumerable;var Xr=(e,t,r)=>t in e?ur(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,R=(e,t)=>{for(var r in t||(t={}))dr.call(t,r)&&Xr(e,r,t[r]);if(kt)for(var r of kt(t))Zr.call(t,r)&&Xr(e,r,t[r]);return e};var eo=(e,t)=>{var r={};for(var o in e)dr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&kt)for(var o of kt(e))t.indexOf(o)<0&&Zr.call(e,o)&&(r[o]=e[o]);return r};var hr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Mi=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Ti(t))!dr.call(e,n)&&n!==r&&ur(e,n,{get:()=>t[n],enumerable:!(o=Si(t,n))||o.enumerable});return e};var Ht=(e,t,r)=>(r=e!=null?wi(Oi(e)):{},Mi(t||!e||!e.__esModule?ur(r,"default",{value:e,enumerable:!0}):r,e));var ro=hr((br,to)=>{(function(e,t){typeof br=="object"&&typeof to!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(br,function(){"use strict";function e(r){var o=!0,n=!1,i=null,s={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function a(C){return!!(C&&C!==document&&C.nodeName!=="HTML"&&C.nodeName!=="BODY"&&"classList"in C&&"contains"in C.classList)}function c(C){var it=C.type,Ne=C.tagName;return!!(Ne==="INPUT"&&s[it]&&!C.readOnly||Ne==="TEXTAREA"&&!C.readOnly||C.isContentEditable)}function p(C){C.classList.contains("focus-visible")||(C.classList.add("focus-visible"),C.setAttribute("data-focus-visible-added",""))}function l(C){C.hasAttribute("data-focus-visible-added")&&(C.classList.remove("focus-visible"),C.removeAttribute("data-focus-visible-added"))}function f(C){C.metaKey||C.altKey||C.ctrlKey||(a(r.activeElement)&&p(r.activeElement),o=!0)}function u(C){o=!1}function d(C){a(C.target)&&(o||c(C.target))&&p(C.target)}function v(C){a(C.target)&&(C.target.classList.contains("focus-visible")||C.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(C.target))}function b(C){document.visibilityState==="hidden"&&(n&&(o=!0),z())}function z(){document.addEventListener("mousemove",G),document.addEventListener("mousedown",G),document.addEventListener("mouseup",G),document.addEventListener("pointermove",G),document.addEventListener("pointerdown",G),document.addEventListener("pointerup",G),document.addEventListener("touchmove",G),document.addEventListener("touchstart",G),document.addEventListener("touchend",G)}function K(){document.removeEventListener("mousemove",G),document.removeEventListener("mousedown",G),document.removeEventListener("mouseup",G),document.removeEventListener("pointermove",G),document.removeEventListener("pointerdown",G),document.removeEventListener("pointerup",G),document.removeEventListener("touchmove",G),document.removeEventListener("touchstart",G),document.removeEventListener("touchend",G)}function G(C){C.target.nodeName&&C.target.nodeName.toLowerCase()==="html"||(o=!1,K())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",b,!0),z(),r.addEventListener("focus",d,!0),r.addEventListener("blur",v,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var Vr=hr((Ot,Dr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Ot=="object"&&typeof Dr=="object"?Dr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Ot=="object"?Ot.ClipboardJS=r():t.ClipboardJS=r()})(Ot,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Ei}});var s=i(279),a=i.n(s),c=i(370),p=i.n(c),l=i(817),f=i.n(l);function u(U){try{return document.execCommand(U)}catch(O){return!1}}var d=function(O){var S=f()(O);return u("cut"),S},v=d;function b(U){var O=document.documentElement.getAttribute("dir")==="rtl",S=document.createElement("textarea");S.style.fontSize="12pt",S.style.border="0",S.style.padding="0",S.style.margin="0",S.style.position="absolute",S.style[O?"right":"left"]="-9999px";var $=window.pageYOffset||document.documentElement.scrollTop;return S.style.top="".concat($,"px"),S.setAttribute("readonly",""),S.value=U,S}var z=function(O,S){var $=b(O);S.container.appendChild($);var F=f()($);return u("copy"),$.remove(),F},K=function(O){var S=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},$="";return typeof O=="string"?$=z(O,S):O instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(O==null?void 0:O.type)?$=z(O.value,S):($=f()(O),u("copy")),$},G=K;function C(U){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?C=function(S){return typeof S}:C=function(S){return S&&typeof Symbol=="function"&&S.constructor===Symbol&&S!==Symbol.prototype?"symbol":typeof S},C(U)}var it=function(){var O=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},S=O.action,$=S===void 0?"copy":S,F=O.container,Q=O.target,_e=O.text;if($!=="copy"&&$!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Q!==void 0)if(Q&&C(Q)==="object"&&Q.nodeType===1){if($==="copy"&&Q.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if($==="cut"&&(Q.hasAttribute("readonly")||Q.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(_e)return G(_e,{container:F});if(Q)return $==="cut"?v(Q):G(Q,{container:F})},Ne=it;function Pe(U){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Pe=function(S){return typeof S}:Pe=function(S){return S&&typeof Symbol=="function"&&S.constructor===Symbol&&S!==Symbol.prototype?"symbol":typeof S},Pe(U)}function ui(U,O){if(!(U instanceof O))throw new TypeError("Cannot call a class as a function")}function Jr(U,O){for(var S=0;S0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof F.action=="function"?F.action:this.defaultAction,this.target=typeof F.target=="function"?F.target:this.defaultTarget,this.text=typeof F.text=="function"?F.text:this.defaultText,this.container=Pe(F.container)==="object"?F.container:document.body}},{key:"listenClick",value:function(F){var Q=this;this.listener=p()(F,"click",function(_e){return Q.onClick(_e)})}},{key:"onClick",value:function(F){var Q=F.delegateTarget||F.currentTarget,_e=this.action(Q)||"copy",Ct=Ne({action:_e,container:this.container,target:this.target(Q),text:this.text(Q)});this.emit(Ct?"success":"error",{action:_e,text:Ct,trigger:Q,clearSelection:function(){Q&&Q.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(F){return fr("action",F)}},{key:"defaultTarget",value:function(F){var Q=fr("target",F);if(Q)return document.querySelector(Q)}},{key:"defaultText",value:function(F){return fr("text",F)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(F){var Q=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return G(F,Q)}},{key:"cut",value:function(F){return v(F)}},{key:"isSupported",value:function(){var F=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Q=typeof F=="string"?[F]:F,_e=!!document.queryCommandSupported;return Q.forEach(function(Ct){_e=_e&&!!document.queryCommandSupported(Ct)}),_e}}]),S}(a()),Ei=yi},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function s(a,c){for(;a&&a.nodeType!==n;){if(typeof a.matches=="function"&&a.matches(c))return a;a=a.parentNode}}o.exports=s},438:function(o,n,i){var s=i(828);function a(l,f,u,d,v){var b=p.apply(this,arguments);return l.addEventListener(u,b,v),{destroy:function(){l.removeEventListener(u,b,v)}}}function c(l,f,u,d,v){return typeof l.addEventListener=="function"?a.apply(null,arguments):typeof u=="function"?a.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(b){return a(b,f,u,d,v)}))}function p(l,f,u,d){return function(v){v.delegateTarget=s(v.target,f),v.delegateTarget&&d.call(l,v)}}o.exports=c},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var s=Object.prototype.toString.call(i);return i!==void 0&&(s==="[object NodeList]"||s==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var s=Object.prototype.toString.call(i);return s==="[object Function]"}},370:function(o,n,i){var s=i(879),a=i(438);function c(u,d,v){if(!u&&!d&&!v)throw new Error("Missing required arguments");if(!s.string(d))throw new TypeError("Second argument must be a String");if(!s.fn(v))throw new TypeError("Third argument must be a Function");if(s.node(u))return p(u,d,v);if(s.nodeList(u))return l(u,d,v);if(s.string(u))return f(u,d,v);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function p(u,d,v){return u.addEventListener(d,v),{destroy:function(){u.removeEventListener(d,v)}}}function l(u,d,v){return Array.prototype.forEach.call(u,function(b){b.addEventListener(d,v)}),{destroy:function(){Array.prototype.forEach.call(u,function(b){b.removeEventListener(d,v)})}}}function f(u,d,v){return a(document.body,u,d,v)}o.exports=c},817:function(o){function n(i){var s;if(i.nodeName==="SELECT")i.focus(),s=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var a=i.hasAttribute("readonly");a||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),a||i.removeAttribute("readonly"),s=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var c=window.getSelection(),p=document.createRange();p.selectNodeContents(i),c.removeAllRanges(),c.addRange(p),s=c.toString()}return s}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,s,a){var c=this.e||(this.e={});return(c[i]||(c[i]=[])).push({fn:s,ctx:a}),this},once:function(i,s,a){var c=this;function p(){c.off(i,p),s.apply(a,arguments)}return p._=s,this.on(i,p,a)},emit:function(i){var s=[].slice.call(arguments,1),a=((this.e||(this.e={}))[i]||[]).slice(),c=0,p=a.length;for(c;c{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var Ha=/["'&<>]/;Un.exports=$a;function $a(e){var t=""+e,r=Ha.exec(t);if(!r)return t;var o,n="",i=0,s=0;for(i=r.index;i0&&i[i.length-1])&&(p[0]===6||p[0]===2)){r=0;continue}if(p[0]===3&&(!i||p[1]>i[0]&&p[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],s;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(a){s={error:a}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(s)throw s.error}}return i}function D(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||a(u,d)})})}function a(u,d){try{c(o[u](d))}catch(v){f(i[0][3],v)}}function c(u){u.value instanceof Ze?Promise.resolve(u.value.v).then(p,l):f(i[0][2],u)}function p(u){a("next",u)}function l(u){a("throw",u)}function f(u,d){u(d),i.shift(),i.length&&a(i[0][0],i[0][1])}}function io(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof we=="function"?we(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(s){return new Promise(function(a,c){s=e[i](s),n(a,c,s.done,s.value)})}}function n(i,s,a,c){Promise.resolve(c).then(function(p){i({value:p,done:a})},s)}}function k(e){return typeof e=="function"}function at(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var Rt=at(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function De(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Ie=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var s=this._parentage;if(s)if(this._parentage=null,Array.isArray(s))try{for(var a=we(s),c=a.next();!c.done;c=a.next()){var p=c.value;p.remove(this)}}catch(b){t={error:b}}finally{try{c&&!c.done&&(r=a.return)&&r.call(a)}finally{if(t)throw t.error}}else s.remove(this);var l=this.initialTeardown;if(k(l))try{l()}catch(b){i=b instanceof Rt?b.errors:[b]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=we(f),d=u.next();!d.done;d=u.next()){var v=d.value;try{ao(v)}catch(b){i=i!=null?i:[],b instanceof Rt?i=D(D([],N(i)),N(b.errors)):i.push(b)}}}catch(b){o={error:b}}finally{try{d&&!d.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new Rt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)ao(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&De(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&De(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var gr=Ie.EMPTY;function Pt(e){return e instanceof Ie||e&&"closed"in e&&k(e.remove)&&k(e.add)&&k(e.unsubscribe)}function ao(e){k(e)?e():e.unsubscribe()}var Ae={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var st={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,s=n.isStopped,a=n.observers;return i||s?gr:(this.currentObservers=null,a.push(r),new Ie(function(){o.currentObservers=null,De(a,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,s=o.isStopped;n?r.error(i):s&&r.complete()},t.prototype.asObservable=function(){var r=new P;return r.source=this,r},t.create=function(r,o){return new ho(r,o)},t}(P);var ho=function(e){ie(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:gr},t}(x);var yt={now:function(){return(yt.delegate||Date).now()},delegate:void 0};var Et=function(e){ie(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=yt);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,s=o._infiniteTimeWindow,a=o._timestampProvider,c=o._windowTime;n||(i.push(r),!s&&i.push(a.now()+c)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,s=n._buffer,a=s.slice(),c=0;c0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=lt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var s=r.actions;o!=null&&((i=s[s.length-1])===null||i===void 0?void 0:i.id)!==o&&(lt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(jt);var go=function(e){ie(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(Wt);var Oe=new go(vo);var L=new P(function(e){return e.complete()});function Ut(e){return e&&k(e.schedule)}function Or(e){return e[e.length-1]}function Qe(e){return k(Or(e))?e.pop():void 0}function Me(e){return Ut(Or(e))?e.pop():void 0}function Nt(e,t){return typeof Or(e)=="number"?e.pop():t}var mt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Dt(e){return k(e==null?void 0:e.then)}function Vt(e){return k(e[pt])}function zt(e){return Symbol.asyncIterator&&k(e==null?void 0:e[Symbol.asyncIterator])}function qt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Pi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var Kt=Pi();function Qt(e){return k(e==null?void 0:e[Kt])}function Yt(e){return no(this,arguments,function(){var r,o,n,i;return $t(this,function(s){switch(s.label){case 0:r=e.getReader(),s.label=1;case 1:s.trys.push([1,,9,10]),s.label=2;case 2:return[4,Ze(r.read())];case 3:return o=s.sent(),n=o.value,i=o.done,i?[4,Ze(void 0)]:[3,5];case 4:return[2,s.sent()];case 5:return[4,Ze(n)];case 6:return[4,s.sent()];case 7:return s.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function Bt(e){return k(e==null?void 0:e.getReader)}function I(e){if(e instanceof P)return e;if(e!=null){if(Vt(e))return Ii(e);if(mt(e))return Fi(e);if(Dt(e))return ji(e);if(zt(e))return xo(e);if(Qt(e))return Wi(e);if(Bt(e))return Ui(e)}throw qt(e)}function Ii(e){return new P(function(t){var r=e[pt]();if(k(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Fi(e){return new P(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?M(function(n,i){return e(n,i,o)}):ue,xe(1),r?He(t):Io(function(){return new Jt}))}}function Fo(){for(var e=[],t=0;t=2,!0))}function le(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new x}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,s=i===void 0?!0:i,a=e.resetOnRefCountZero,c=a===void 0?!0:a;return function(p){var l,f,u,d=0,v=!1,b=!1,z=function(){f==null||f.unsubscribe(),f=void 0},K=function(){z(),l=u=void 0,v=b=!1},G=function(){var C=l;K(),C==null||C.unsubscribe()};return g(function(C,it){d++,!b&&!v&&z();var Ne=u=u!=null?u:r();it.add(function(){d--,d===0&&!b&&!v&&(f=Hr(G,c))}),Ne.subscribe(it),!l&&d>0&&(l=new tt({next:function(Pe){return Ne.next(Pe)},error:function(Pe){b=!0,z(),f=Hr(K,n,Pe),Ne.error(Pe)},complete:function(){v=!0,z(),f=Hr(K,s),Ne.complete()}}),I(C).subscribe(l))})(p)}}function Hr(e,t){for(var r=[],o=2;oe.next(document)),e}function q(e,t=document){return Array.from(t.querySelectorAll(e))}function W(e,t=document){let r=ce(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function ce(e,t=document){return t.querySelector(e)||void 0}function Re(){return document.activeElement instanceof HTMLElement&&document.activeElement||void 0}var na=_(h(document.body,"focusin"),h(document.body,"focusout")).pipe(ke(1),V(void 0),m(()=>Re()||document.body),J(1));function Zt(e){return na.pipe(m(t=>e.contains(t)),X())}function Je(e){return{x:e.offsetLeft,y:e.offsetTop}}function No(e){return _(h(window,"load"),h(window,"resize")).pipe(Ce(0,Oe),m(()=>Je(e)),V(Je(e)))}function er(e){return{x:e.scrollLeft,y:e.scrollTop}}function dt(e){return _(h(e,"scroll"),h(window,"resize")).pipe(Ce(0,Oe),m(()=>er(e)),V(er(e)))}function Do(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Do(e,r)}function T(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Do(o,n);return o}function tr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function ht(e){let t=T("script",{src:e});return H(()=>(document.head.appendChild(t),_(h(t,"load"),h(t,"error").pipe(E(()=>Mr(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),A(()=>document.head.removeChild(t)),xe(1))))}var Vo=new x,ia=H(()=>typeof ResizeObserver=="undefined"?ht("https://unpkg.com/resize-observer-polyfill"):j(void 0)).pipe(m(()=>new ResizeObserver(e=>{for(let t of e)Vo.next(t)})),E(e=>_(Ve,j(e)).pipe(A(()=>e.disconnect()))),J(1));function he(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ye(e){return ia.pipe(w(t=>t.observe(e)),E(t=>Vo.pipe(M(({target:r})=>r===e),A(()=>t.unobserve(e)),m(()=>he(e)))),V(he(e)))}function bt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function zo(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}var qo=new x,aa=H(()=>j(new IntersectionObserver(e=>{for(let t of e)qo.next(t)},{threshold:0}))).pipe(E(e=>_(Ve,j(e)).pipe(A(()=>e.disconnect()))),J(1));function rr(e){return aa.pipe(w(t=>t.observe(e)),E(t=>qo.pipe(M(({target:r})=>r===e),A(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function Ko(e,t=16){return dt(e).pipe(m(({y:r})=>{let o=he(e),n=bt(e);return r>=n.height-o.height-t}),X())}var or={drawer:W("[data-md-toggle=drawer]"),search:W("[data-md-toggle=search]")};function Qo(e){return or[e].checked}function Ke(e,t){or[e].checked!==t&&or[e].click()}function We(e){let t=or[e];return h(t,"change").pipe(m(()=>t.checked),V(t.checked))}function sa(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function ca(){return _(h(window,"compositionstart").pipe(m(()=>!0)),h(window,"compositionend").pipe(m(()=>!1))).pipe(V(!1))}function Yo(){let e=h(window,"keydown").pipe(M(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:Qo("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),M(({mode:t,type:r})=>{if(t==="global"){let o=Re();if(typeof o!="undefined")return!sa(o,r)}return!0}),le());return ca().pipe(E(t=>t?L:e))}function pe(){return new URL(location.href)}function ot(e,t=!1){if(te("navigation.instant")&&!t){let r=T("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function Bo(){return new x}function Go(){return location.hash.slice(1)}function nr(e){let t=T("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function pa(e){return _(h(window,"hashchange"),e).pipe(m(Go),V(Go()),M(t=>t.length>0),J(1))}function Jo(e){return pa(e).pipe(m(t=>ce(`[id="${t}"]`)),M(t=>typeof t!="undefined"))}function Fr(e){let t=matchMedia(e);return Xt(r=>t.addListener(()=>r(t.matches))).pipe(V(t.matches))}function Xo(){let e=matchMedia("print");return _(h(window,"beforeprint").pipe(m(()=>!0)),h(window,"afterprint").pipe(m(()=>!1))).pipe(V(e.matches))}function jr(e,t){return e.pipe(E(r=>r?t():L))}function ir(e,t){return new P(r=>{let o=new XMLHttpRequest;o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network Error"))}),o.addEventListener("abort",()=>{r.error(new Error("Request aborted"))}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{t.progress$.next(n.loaded/n.total*100)}),t.progress$.next(5)),o.send()})}function Ue(e,t){return ir(e,t).pipe(E(r=>r.text()),m(r=>JSON.parse(r)),J(1))}function Zo(e,t){let r=new DOMParser;return ir(e,t).pipe(E(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),J(1))}function en(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function tn(){return _(h(window,"scroll",{passive:!0}),h(window,"resize",{passive:!0})).pipe(m(en),V(en()))}function rn(){return{width:innerWidth,height:innerHeight}}function on(){return h(window,"resize",{passive:!0}).pipe(m(rn),V(rn()))}function nn(){return B([tn(),on()]).pipe(m(([e,t])=>({offset:e,size:t})),J(1))}function ar(e,{viewport$:t,header$:r}){let o=t.pipe(ee("size")),n=B([o,r]).pipe(m(()=>Je(e)));return B([r,t,n]).pipe(m(([{height:i},{offset:s,size:a},{x:c,y:p}])=>({offset:{x:s.x-c,y:s.y-p+i},size:a})))}function la(e){return h(e,"message",t=>t.data)}function ma(e){let t=new x;return t.subscribe(r=>e.postMessage(r)),t}function an(e,t=new Worker(e)){let r=la(t),o=ma(t),n=new x;n.subscribe(o);let i=o.pipe(Z(),re(!0));return n.pipe(Z(),qe(r.pipe(Y(i))),le())}var fa=W("#__config"),vt=JSON.parse(fa.textContent);vt.base=`${new URL(vt.base,pe())}`;function me(){return vt}function te(e){return vt.features.includes(e)}function be(e,t){return typeof t!="undefined"?vt.translations[e].replace("#",t.toString()):vt.translations[e]}function Ee(e,t=document){return W(`[data-md-component=${e}]`,t)}function oe(e,t=document){return q(`[data-md-component=${e}]`,t)}function ua(e){let t=W(".md-typeset > :first-child",e);return h(t,"click",{once:!0}).pipe(m(()=>W(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function sn(e){if(!te("announce.dismiss")||!e.childElementCount)return L;if(!e.hidden){let t=W(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return H(()=>{let t=new x;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),ua(e).pipe(w(r=>t.next(r)),A(()=>t.complete()),m(r=>R({ref:e},r)))})}function da(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function cn(e,t){let r=new x;return r.subscribe(({hidden:o})=>{e.hidden=o}),da(e,t).pipe(w(o=>r.next(o)),A(()=>r.complete()),m(o=>R({ref:e},o)))}function ha(e,t){let r=H(()=>B([No(e),dt(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:s,height:a}=he(e);return{x:o-i.x+s/2,y:n-i.y+a/2}}));return Zt(e).pipe(E(o=>r.pipe(m(n=>({active:o,offset:n})),xe(+!o||1/0))))}function pn(e,t,{target$:r}){let[o,n]=Array.from(e.children);return H(()=>{let i=new x,s=i.pipe(Z(),re(!0));return i.subscribe({next({offset:a}){e.style.setProperty("--md-tooltip-x",`${a.x}px`),e.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),rr(e).pipe(Y(s)).subscribe(a=>{e.toggleAttribute("data-md-visible",a)}),_(i.pipe(M(({active:a})=>a)),i.pipe(ke(250),M(({active:a})=>!a))).subscribe({next({active:a}){a?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Ce(16,Oe)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(Pr(125,Oe),M(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?e.style.setProperty("--md-tooltip-0",`${-a}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),h(n,"click").pipe(Y(s),M(a=>!(a.metaKey||a.ctrlKey))).subscribe(a=>{a.stopPropagation(),a.preventDefault()}),h(n,"mousedown").pipe(Y(s),ne(i)).subscribe(([a,{active:c}])=>{var p;if(a.button!==0||a.metaKey||a.ctrlKey)a.preventDefault();else if(c){a.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(p=Re())==null||p.blur()}}),r.pipe(Y(s),M(a=>a===o),ze(125)).subscribe(()=>e.focus()),ha(e,t).pipe(w(a=>i.next(a)),A(()=>i.complete()),m(a=>R({ref:e},a)))})}function Wr(e){return T("div",{class:"md-tooltip",id:e},T("div",{class:"md-tooltip__inner md-typeset"}))}function ln(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return T("aside",{class:"md-annotation",tabIndex:0},Wr(t),T("a",{href:r,class:"md-annotation__index",tabIndex:-1},T("span",{"data-md-annotation-id":e})))}else return T("aside",{class:"md-annotation",tabIndex:0},Wr(t),T("span",{class:"md-annotation__index",tabIndex:-1},T("span",{"data-md-annotation-id":e})))}function mn(e){return T("button",{class:"md-clipboard md-icon",title:be("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}function Ur(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(c=>!e.terms[c]).reduce((c,p)=>[...c,T("del",null,p)," "],[]).slice(0,-1),i=me(),s=new URL(e.location,i.base);te("search.highlight")&&s.searchParams.set("h",Object.entries(e.terms).filter(([,c])=>c).reduce((c,[p])=>`${c} ${p}`.trim(),""));let{tags:a}=me();return T("a",{href:`${s}`,class:"md-search-result__link",tabIndex:-1},T("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&T("div",{class:"md-search-result__icon md-icon"}),r>0&&T("h1",null,e.title),r<=0&&T("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&e.tags.map(c=>{let p=a?c in a?`md-tag-icon md-tag--${a[c]}`:"md-tag-icon":"";return T("span",{class:`md-tag ${p}`},c)}),o>0&&n.length>0&&T("p",{class:"md-search-result__terms"},be("search.result.term.missing"),": ",...n)))}function fn(e){let t=e[0].score,r=[...e],o=me(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),s=r.findIndex(l=>l.scoreUr(l,1)),...c.length?[T("details",{class:"md-search-result__more"},T("summary",{tabIndex:-1},T("div",null,c.length>0&&c.length===1?be("search.result.more.one"):be("search.result.more.other",c.length))),...c.map(l=>Ur(l,1)))]:[]];return T("li",{class:"md-search-result__item"},p)}function un(e){return T("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>T("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?tr(r):r)))}function Nr(e){let t=`tabbed-control tabbed-control--${e}`;return T("div",{class:t,hidden:!0},T("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function dn(e){return T("div",{class:"md-typeset__scrollwrap"},T("div",{class:"md-typeset__table"},e))}function ba(e){let t=me(),r=new URL(`../${e.version}/`,t.base);return T("li",{class:"md-version__item"},T("a",{href:`${r}`,class:"md-version__link"},e.title))}function hn(e,t){return T("div",{class:"md-version"},T("button",{class:"md-version__current","aria-label":be("select.version")},t.title),T("ul",{class:"md-version__list"},e.map(ba)))}function va(e){return e.tagName==="CODE"?q(".c, .c1, .cm",e):[e]}function ga(e){let t=[];for(let r of va(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let s;for(;s=/(\(\d+\))(!)?/.exec(i.textContent);){let[,a,c]=s;if(typeof c=="undefined"){let p=i.splitText(s.index);i=p.splitText(a.length),t.push(p)}else{i.textContent=a,t.push(i);break}}}}return t}function bn(e,t){t.append(...Array.from(e.childNodes))}function sr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,s=new Map;for(let a of ga(t)){let[,c]=a.textContent.match(/\((\d+)\)/);ce(`:scope > li:nth-child(${c})`,e)&&(s.set(c,ln(c,i)),a.replaceWith(s.get(c)))}return s.size===0?L:H(()=>{let a=new x,c=a.pipe(Z(),re(!0)),p=[];for(let[l,f]of s)p.push([W(".md-typeset",f),W(`:scope > li:nth-child(${l})`,e)]);return o.pipe(Y(c)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of p)l?bn(f,u):bn(u,f)}),_(...[...s].map(([,l])=>pn(l,t,{target$:r}))).pipe(A(()=>a.complete()),le())})}function vn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return vn(t)}}function gn(e,t){return H(()=>{let r=vn(e);return typeof r!="undefined"?sr(r,e,t):L})}var yn=Ht(Vr());var xa=0;function En(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return En(t)}}function xn(e){return ye(e).pipe(m(({width:t})=>({scrollable:bt(e).width>t})),ee("scrollable"))}function wn(e,t){let{matches:r}=matchMedia("(hover)"),o=H(()=>{let n=new x;if(n.subscribe(({scrollable:s})=>{s&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")}),yn.default.isSupported()&&(e.closest(".copy")||te("content.code.copy")&&!e.closest(".no-copy"))){let s=e.closest("pre");s.id=`__code_${xa++}`,s.insertBefore(mn(s.id),e)}let i=e.closest(".highlight");if(i instanceof HTMLElement){let s=En(i);if(typeof s!="undefined"&&(i.classList.contains("annotate")||te("content.code.annotate"))){let a=sr(s,e,t);return xn(e).pipe(w(c=>n.next(c)),A(()=>n.complete()),m(c=>R({ref:e},c)),qe(ye(i).pipe(m(({width:c,height:p})=>c&&p),X(),E(c=>c?a:L))))}}return xn(e).pipe(w(s=>n.next(s)),A(()=>n.complete()),m(s=>R({ref:e},s)))});return te("content.lazy")?rr(e).pipe(M(n=>n),xe(1),E(()=>o)):o}function ya(e,{target$:t,print$:r}){let o=!0;return _(t.pipe(m(n=>n.closest("details:not([open])")),M(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(M(n=>n||!o),w(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Sn(e,t){return H(()=>{let r=new x;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),ya(e,t).pipe(w(o=>r.next(o)),A(()=>r.complete()),m(o=>R({ref:e},o)))})}var Tn=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var zr,wa=0;function Sa(){return typeof mermaid=="undefined"||mermaid instanceof Element?ht("https://unpkg.com/mermaid@9.4.3/dist/mermaid.min.js"):j(void 0)}function On(e){return e.classList.remove("mermaid"),zr||(zr=Sa().pipe(w(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Tn,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),J(1))),zr.subscribe(()=>{e.classList.add("mermaid");let t=`__mermaid_${wa++}`,r=T("div",{class:"mermaid"}),o=e.textContent;mermaid.mermaidAPI.render(t,o,(n,i)=>{let s=r.attachShadow({mode:"closed"});s.innerHTML=n,e.replaceWith(r),i==null||i(s)})}),zr.pipe(m(()=>({ref:e})))}var Mn=T("table");function Ln(e){return e.replaceWith(Mn),Mn.replaceWith(dn(e)),j({ref:e})}function Ta(e){let t=q(":scope > input",e),r=t.find(o=>o.checked)||t[0];return _(...t.map(o=>h(o,"change").pipe(m(()=>W(`label[for="${o.id}"]`))))).pipe(V(W(`label[for="${r.id}"]`)),m(o=>({active:o})))}function _n(e,{viewport$:t}){let r=Nr("prev");e.append(r);let o=Nr("next");e.append(o);let n=W(".tabbed-labels",e);return H(()=>{let i=new x,s=i.pipe(Z(),re(!0));return B([i,ye(e)]).pipe(Ce(1,Oe),Y(s)).subscribe({next([{active:a},c]){let p=Je(a),{width:l}=he(a);e.style.setProperty("--md-indicator-x",`${p.x}px`),e.style.setProperty("--md-indicator-width",`${l}px`);let f=er(n);(p.xf.x+c.width)&&n.scrollTo({left:Math.max(0,p.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),B([dt(n),ye(n)]).pipe(Y(s)).subscribe(([a,c])=>{let p=bt(n);r.hidden=a.x<16,o.hidden=a.x>p.width-c.width-16}),_(h(r,"click").pipe(m(()=>-1)),h(o,"click").pipe(m(()=>1))).pipe(Y(s)).subscribe(a=>{let{width:c}=he(n);n.scrollBy({left:c*a,behavior:"smooth"})}),te("content.tabs.link")&&i.pipe(je(1),ne(t)).subscribe(([{active:a},{offset:c}])=>{let p=a.innerText.trim();if(a.hasAttribute("data-md-switching"))a.removeAttribute("data-md-switching");else{let l=e.offsetTop-c.y;for(let u of q("[data-tabs]"))for(let d of q(":scope > input",u)){let v=W(`label[for="${d.id}"]`);if(v!==a&&v.innerText.trim()===p){v.setAttribute("data-md-switching",""),d.click();break}}window.scrollTo({top:e.offsetTop-l});let f=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([p,...f])])}}),i.pipe(Y(s)).subscribe(()=>{for(let a of q("audio, video",e))a.pause()}),Ta(e).pipe(w(a=>i.next(a)),A(()=>i.complete()),m(a=>R({ref:e},a)))}).pipe(rt(ae))}function An(e,{viewport$:t,target$:r,print$:o}){return _(...q(".annotate:not(.highlight)",e).map(n=>gn(n,{target$:r,print$:o})),...q("pre:not(.mermaid) > code",e).map(n=>wn(n,{target$:r,print$:o})),...q("pre.mermaid",e).map(n=>On(n)),...q("table:not([class])",e).map(n=>Ln(n)),...q("details",e).map(n=>Sn(n,{target$:r,print$:o})),...q("[data-tabs]",e).map(n=>_n(n,{viewport$:t})))}function Oa(e,{alert$:t}){return t.pipe(E(r=>_(j(!0),j(!1).pipe(ze(2e3))).pipe(m(o=>({message:r,active:o})))))}function Cn(e,t){let r=W(".md-typeset",e);return H(()=>{let o=new x;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),Oa(e,t).pipe(w(n=>o.next(n)),A(()=>o.complete()),m(n=>R({ref:e},n)))})}function Ma({viewport$:e}){if(!te("header.autohide"))return j(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Le(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),X()),o=We("search");return B([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),X(),E(n=>n?r:j(!1)),V(!1))}function kn(e,t){return H(()=>B([ye(e),Ma(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),X((r,o)=>r.height===o.height&&r.hidden===o.hidden),J(1))}function Hn(e,{header$:t,main$:r}){return H(()=>{let o=new x,n=o.pipe(Z(),re(!0));return o.pipe(ee("active"),Ge(t)).subscribe(([{active:i},{hidden:s}])=>{e.classList.toggle("md-header--shadow",i&&!s),e.hidden=s}),r.subscribe(o),t.pipe(Y(n),m(i=>R({ref:e},i)))})}function La(e,{viewport$:t,header$:r}){return ar(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=he(e);return{active:o>=n}}),ee("active"))}function $n(e,t){return H(()=>{let r=new x;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=ce(".md-content h1");return typeof o=="undefined"?L:La(o,t).pipe(w(n=>r.next(n)),A(()=>r.complete()),m(n=>R({ref:e},n)))})}function Rn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),X()),n=o.pipe(E(()=>ye(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),ee("bottom"))));return B([o,n,t]).pipe(m(([i,{top:s,bottom:a},{offset:{y:c},size:{height:p}}])=>(p=Math.max(0,p-Math.max(0,s-c,i)-Math.max(0,p+c-a)),{offset:s-i,height:p,active:s-i<=c})),X((i,s)=>i.offset===s.offset&&i.height===s.height&&i.active===s.active))}function _a(e){let t=__md_get("__palette")||{index:e.findIndex(r=>matchMedia(r.getAttribute("data-md-color-media")).matches)};return j(...e).pipe(se(r=>h(r,"change").pipe(m(()=>r))),V(e[Math.max(0,t.index)]),m(r=>({index:e.indexOf(r),color:{scheme:r.getAttribute("data-md-color-scheme"),primary:r.getAttribute("data-md-color-primary"),accent:r.getAttribute("data-md-color-accent")}})),J(1))}function Pn(e){let t=T("meta",{name:"theme-color"});document.head.appendChild(t);let r=T("meta",{name:"color-scheme"});return document.head.appendChild(r),H(()=>{let o=new x;o.subscribe(i=>{document.body.setAttribute("data-md-color-switching","");for(let[s,a]of Object.entries(i.color))document.body.setAttribute(`data-md-color-${s}`,a);for(let s=0;s{let i=Ee("header"),s=window.getComputedStyle(i);return r.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(a=>(+a).toString(16).padStart(2,"0")).join("")})).subscribe(i=>t.content=`#${i}`),o.pipe(Se(ae)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")});let n=q("input",e);return _a(n).pipe(w(i=>o.next(i)),A(()=>o.complete()),m(i=>R({ref:e},i)))})}function In(e,{progress$:t}){return H(()=>{let r=new x;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(w(o=>r.next({value:o})),A(()=>r.complete()),m(o=>({ref:e,value:o})))})}var qr=Ht(Vr());function Aa(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r}function Fn({alert$:e}){qr.default.isSupported()&&new P(t=>{new qr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||Aa(W(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(w(t=>{t.trigger.focus()}),m(()=>be("clipboard.copied"))).subscribe(e)}function Ca(e){if(e.length<2)return[""];let[t,r]=[...e].sort((n,i)=>n.length-i.length).map(n=>n.replace(/[^/]+$/,"")),o=0;if(t===r)o=t.length;else for(;t.charCodeAt(o)===r.charCodeAt(o);)o++;return e.map(n=>n.replace(t.slice(0,o),""))}function cr(e){let t=__md_get("__sitemap",sessionStorage,e);if(t)return j(t);{let r=me();return Zo(new URL("sitemap.xml",e||r.base)).pipe(m(o=>Ca(q("loc",o).map(n=>n.textContent))),de(()=>L),He([]),w(o=>__md_set("__sitemap",o,sessionStorage,e)))}}function jn(e){let t=W("[rel=canonical]",e);t.href=t.href.replace("//localhost:","//127.0.0.1");let r=new Map;for(let o of q(":scope > *",e)){let n=o.outerHTML;for(let i of["href","src"]){let s=o.getAttribute(i);if(s===null)continue;let a=new URL(s,t.href),c=o.cloneNode();c.setAttribute(i,`${a}`),n=c.outerHTML;break}r.set(n,o)}return r}function Wn({location$:e,viewport$:t,progress$:r}){let o=me();if(location.protocol==="file:")return L;let n=cr().pipe(m(l=>l.map(f=>`${new URL(f,o.base)}`))),i=h(document.body,"click").pipe(ne(n),E(([l,f])=>{if(!(l.target instanceof Element))return L;let u=l.target.closest("a");if(u===null)return L;if(u.target||l.metaKey||l.ctrlKey)return L;let d=new URL(u.href);return d.search=d.hash="",f.includes(`${d}`)?(l.preventDefault(),j(new URL(u.href))):L}),le());i.pipe(xe(1)).subscribe(()=>{let l=ce("link[rel=icon]");typeof l!="undefined"&&(l.href=l.href)}),h(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),i.pipe(ne(t)).subscribe(([l,{offset:f}])=>{history.scrollRestoration="manual",history.replaceState(f,""),history.pushState(null,"",l)}),i.subscribe(e);let s=e.pipe(V(pe()),ee("pathname"),je(1),E(l=>ir(l,{progress$:r}).pipe(de(()=>(ot(l,!0),L))))),a=new DOMParser,c=s.pipe(E(l=>l.text()),E(l=>{let f=a.parseFromString(l,"text/html");for(let b of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...te("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let z=ce(b),K=ce(b,f);typeof z!="undefined"&&typeof K!="undefined"&&z.replaceWith(K)}let u=jn(document.head),d=jn(f.head);for(let[b,z]of d)z.getAttribute("rel")==="stylesheet"||z.hasAttribute("src")||(u.has(b)?u.delete(b):document.head.appendChild(z));for(let b of u.values())b.getAttribute("rel")==="stylesheet"||b.hasAttribute("src")||b.remove();let v=Ee("container");return Fe(q("script",v)).pipe(E(b=>{let z=f.createElement("script");if(b.src){for(let K of b.getAttributeNames())z.setAttribute(K,b.getAttribute(K));return b.replaceWith(z),new P(K=>{z.onload=()=>K.complete()})}else return z.textContent=b.textContent,b.replaceWith(z),L}),Z(),re(f))}),le());return h(window,"popstate").pipe(m(pe)).subscribe(e),e.pipe(V(pe()),Le(2,1),M(([l,f])=>l.pathname===f.pathname&&l.hash!==f.hash),m(([,l])=>l)).subscribe(l=>{var f,u;history.state!==null||!l.hash?window.scrollTo(0,(u=(f=history.state)==null?void 0:f.y)!=null?u:0):(history.scrollRestoration="auto",nr(l.hash),history.scrollRestoration="manual")}),e.pipe(Cr(i),V(pe()),Le(2,1),M(([l,f])=>l.pathname===f.pathname&&l.hash===f.hash),m(([,l])=>l)).subscribe(l=>{history.scrollRestoration="auto",nr(l.hash),history.scrollRestoration="manual",history.back()}),c.pipe(ne(e)).subscribe(([,l])=>{var f,u;history.state!==null||!l.hash?window.scrollTo(0,(u=(f=history.state)==null?void 0:f.y)!=null?u:0):nr(l.hash)}),t.pipe(ee("offset"),ke(100)).subscribe(({offset:l})=>{history.replaceState(l,"")}),c}var Dn=Ht(Nn());function Vn(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,s)=>`${i}${s}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return s=>(0,Dn.default)(s).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function Mt(e){return e.type===1}function pr(e){return e.type===3}function zn(e,t){let r=an(e);return _(j(location.protocol!=="file:"),We("search")).pipe($e(o=>o),E(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:te("search.suggest")}}})),r}function qn({document$:e}){let t=me(),r=Ue(new URL("../versions.json",t.base)).pipe(de(()=>L)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:s,aliases:a})=>s===i||a.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),E(n=>h(document.body,"click").pipe(M(i=>!i.metaKey&&!i.ctrlKey),ne(o),E(([i,s])=>{if(i.target instanceof Element){let a=i.target.closest("a");if(a&&!a.target&&n.has(a.href)){let c=a.href;return!i.target.closest(".md-version")&&n.get(c)===s?L:(i.preventDefault(),j(c))}}return L}),E(i=>{let{version:s}=n.get(i);return cr(new URL(i)).pipe(m(a=>{let p=pe().href.replace(t.base,"");return a.includes(p.split("#")[0])?new URL(`../${s}/${p}`,t.base):new URL(i)}))})))).subscribe(n=>ot(n,!0)),B([r,o]).subscribe(([n,i])=>{W(".md-header__topic").appendChild(hn(n,i))}),e.pipe(E(()=>o)).subscribe(n=>{var s;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let a=((s=t.version)==null?void 0:s.default)||"latest";Array.isArray(a)||(a=[a]);e:for(let c of a)for(let p of n.aliases)if(new RegExp(c,"i").test(p)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let a of oe("outdated"))a.hidden=!1})}function Pa(e,{worker$:t}){let{searchParams:r}=pe();r.has("q")&&(Ke("search",!0),e.value=r.get("q"),e.focus(),We("search").pipe($e(i=>!i)).subscribe(()=>{let i=pe();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=Zt(e),n=_(t.pipe($e(Mt)),h(e,"keyup"),o).pipe(m(()=>e.value),X());return B([n,o]).pipe(m(([i,s])=>({value:i,focus:s})),J(1))}function Kn(e,{worker$:t}){let r=new x,o=r.pipe(Z(),re(!0));B([t.pipe($e(Mt)),r],(i,s)=>s).pipe(ee("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(ee("focus")).subscribe(({focus:i})=>{i&&Ke("search",i)}),h(e.form,"reset").pipe(Y(o)).subscribe(()=>e.focus());let n=W("header [for=__search]");return h(n,"click").subscribe(()=>e.focus()),Pa(e,{worker$:t}).pipe(w(i=>r.next(i)),A(()=>r.complete()),m(i=>R({ref:e},i)),J(1))}function Qn(e,{worker$:t,query$:r}){let o=new x,n=Ko(e.parentElement).pipe(M(Boolean)),i=e.parentElement,s=W(":scope > :first-child",e),a=W(":scope > :last-child",e);We("search").subscribe(l=>a.setAttribute("role",l?"list":"presentation")),o.pipe(ne(r),$r(t.pipe($e(Mt)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:s.textContent=f.length?be("search.result.none"):be("search.result.placeholder");break;case 1:s.textContent=be("search.result.one");break;default:let u=tr(l.length);s.textContent=be("search.result.other",u)}});let c=o.pipe(w(()=>a.innerHTML=""),E(({items:l})=>_(j(...l.slice(0,10)),j(...l.slice(10)).pipe(Le(4),Ir(n),E(([f])=>f)))),m(fn),le());return c.subscribe(l=>a.appendChild(l)),c.pipe(se(l=>{let f=ce("details",l);return typeof f=="undefined"?L:h(f,"toggle").pipe(Y(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(M(pr),m(({data:l})=>l)).pipe(w(l=>o.next(l)),A(()=>o.complete()),m(l=>R({ref:e},l)))}function Ia(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=pe();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function Yn(e,t){let r=new x,o=r.pipe(Z(),re(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),h(e,"click").pipe(Y(o)).subscribe(n=>n.preventDefault()),Ia(e,t).pipe(w(n=>r.next(n)),A(()=>r.complete()),m(n=>R({ref:e},n)))}function Bn(e,{worker$:t,keyboard$:r}){let o=new x,n=Ee("search-query"),i=_(h(n,"keydown"),h(n,"focus")).pipe(Se(ae),m(()=>n.value),X());return o.pipe(Ge(i),m(([{suggest:a},c])=>{let p=c.split(/([\s-]+)/);if(a!=null&&a.length&&p[p.length-1]){let l=a[a.length-1];l.startsWith(p[p.length-1])&&(p[p.length-1]=l)}else p.length=0;return p})).subscribe(a=>e.innerHTML=a.join("").replace(/\s/g," ")),r.pipe(M(({mode:a})=>a==="search")).subscribe(a=>{switch(a.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(M(pr),m(({data:a})=>a)).pipe(w(a=>o.next(a)),A(()=>o.complete()),m(()=>({ref:e})))}function Gn(e,{index$:t,keyboard$:r}){let o=me();try{let n=zn(o.search,t),i=Ee("search-query",e),s=Ee("search-result",e);h(e,"click").pipe(M(({target:c})=>c instanceof Element&&!!c.closest("a"))).subscribe(()=>Ke("search",!1)),r.pipe(M(({mode:c})=>c==="search")).subscribe(c=>{let p=Re();switch(c.type){case"Enter":if(p===i){let l=new Map;for(let f of q(":first-child [href]",s)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,d])=>d-u);f.click()}c.claim()}break;case"Escape":case"Tab":Ke("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof p=="undefined")i.focus();else{let l=[i,...q(":not(details) > [href], summary, details[open] [href]",s)],f=Math.max(0,(Math.max(0,l.indexOf(p))+l.length+(c.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}c.claim();break;default:i!==Re()&&i.focus()}}),r.pipe(M(({mode:c})=>c==="global")).subscribe(c=>{switch(c.type){case"f":case"s":case"/":i.focus(),i.select(),c.claim();break}});let a=Kn(i,{worker$:n});return _(a,Qn(s,{worker$:n,query$:a})).pipe(qe(...oe("search-share",e).map(c=>Yn(c,{query$:a})),...oe("search-suggest",e).map(c=>Bn(c,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ve}}function Jn(e,{index$:t,location$:r}){return B([t,r.pipe(V(pe()),M(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>Vn(o.config)(n.searchParams.get("h"))),m(o=>{var s;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let a=i.nextNode();a;a=i.nextNode())if((s=a.parentElement)!=null&&s.offsetHeight){let c=a.textContent,p=o(c);p.length>c.length&&n.set(a,p)}for(let[a,c]of n){let{childNodes:p}=T("span",null,c);a.replaceWith(...Array.from(p))}return{ref:e,nodes:n}}))}function Fa(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return B([r,t]).pipe(m(([{offset:i,height:s},{offset:{y:a}}])=>(s=s+Math.min(n,Math.max(0,a-i))-n,{height:s,locked:a>=i+n})),X((i,s)=>i.height===s.height&&i.locked===s.locked))}function Kr(e,o){var n=o,{header$:t}=n,r=eo(n,["header$"]);let i=W(".md-sidebar__scrollwrap",e),{y:s}=Je(i);return H(()=>{let a=new x,c=a.pipe(Z(),re(!0)),p=a.pipe(Ce(0,Oe));return p.pipe(ne(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*s}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),p.pipe($e()).subscribe(()=>{for(let l of q(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=he(f);f.scrollTo({top:u-d/2})}}}),ge(q("label[tabindex]",e)).pipe(se(l=>h(l,"click").pipe(Se(ae),m(()=>l),Y(c)))).subscribe(l=>{let f=W(`[id="${l.htmlFor}"]`);W(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),Fa(e,r).pipe(w(l=>a.next(l)),A(()=>a.complete()),m(l=>R({ref:e},l)))})}function Xn(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return St(Ue(`${r}/releases/latest`).pipe(de(()=>L),m(o=>({version:o.tag_name})),He({})),Ue(r).pipe(de(()=>L),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),He({}))).pipe(m(([o,n])=>R(R({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return Ue(r).pipe(m(o=>({repositories:o.public_repos})),He({}))}}function Zn(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return Ue(r).pipe(de(()=>L),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),He({}))}function ei(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return Xn(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return Zn(r,o)}return L}var ja;function Wa(e){return ja||(ja=H(()=>{let t=__md_get("__source",sessionStorage);if(t)return j(t);if(oe("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return L}return ei(e.href).pipe(w(o=>__md_set("__source",o,sessionStorage)))}).pipe(de(()=>L),M(t=>Object.keys(t).length>0),m(t=>({facts:t})),J(1)))}function ti(e){let t=W(":scope > :last-child",e);return H(()=>{let r=new x;return r.subscribe(({facts:o})=>{t.appendChild(un(o)),t.classList.add("md-source__repository--active")}),Wa(e).pipe(w(o=>r.next(o)),A(()=>r.complete()),m(o=>R({ref:e},o)))})}function Ua(e,{viewport$:t,header$:r}){return ye(document.body).pipe(E(()=>ar(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),ee("hidden"))}function ri(e,t){return H(()=>{let r=new x;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(te("navigation.tabs.sticky")?j({hidden:!1}):Ua(e,t)).pipe(w(o=>r.next(o)),A(()=>r.complete()),m(o=>R({ref:e},o)))})}function Na(e,{viewport$:t,header$:r}){let o=new Map,n=q("[href^=\\#]",e);for(let a of n){let c=decodeURIComponent(a.hash.substring(1)),p=ce(`[id="${c}"]`);typeof p!="undefined"&&o.set(a,p)}let i=r.pipe(ee("height"),m(({height:a})=>{let c=Ee("main"),p=W(":scope > :first-child",c);return a+.8*(p.offsetTop-c.offsetTop)}),le());return ye(document.body).pipe(ee("height"),E(a=>H(()=>{let c=[];return j([...o].reduce((p,[l,f])=>{for(;c.length&&o.get(c[c.length-1]).tagName>=f.tagName;)c.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let d=f.offsetParent;for(;d;d=d.offsetParent)u+=d.offsetTop;return p.set([...c=[...c,l]].reverse(),u)},new Map))}).pipe(m(c=>new Map([...c].sort(([,p],[,l])=>p-l))),Ge(i),E(([c,p])=>t.pipe(kr(([l,f],{offset:{y:u},size:d})=>{let v=u+d.height>=Math.floor(a.height);for(;f.length;){let[,b]=f[0];if(b-p=u&&!v)f=[l.pop(),...f];else break}return[l,f]},[[],[...c]]),X((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([a,c])=>({prev:a.map(([p])=>p),next:c.map(([p])=>p)})),V({prev:[],next:[]}),Le(2,1),m(([a,c])=>a.prev.length{let i=new x,s=i.pipe(Z(),re(!0));if(i.subscribe(({prev:a,next:c})=>{for(let[p]of c)p.classList.remove("md-nav__link--passed"),p.classList.remove("md-nav__link--active");for(let[p,[l]]of a.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",p===a.length-1)}),te("toc.follow")){let a=_(t.pipe(ke(1),m(()=>{})),t.pipe(ke(250),m(()=>"smooth")));i.pipe(M(({prev:c})=>c.length>0),Ge(o.pipe(Se(ae))),ne(a)).subscribe(([[{prev:c}],p])=>{let[l]=c[c.length-1];if(l.offsetHeight){let f=zo(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=he(f);f.scrollTo({top:u-d/2,behavior:p})}}})}return te("navigation.tracking")&&t.pipe(Y(s),ee("offset"),ke(250),je(1),Y(n.pipe(je(1))),Tt({delay:250}),ne(i)).subscribe(([,{prev:a}])=>{let c=pe(),p=a[a.length-1];if(p&&p.length){let[l]=p,{hash:f}=new URL(l.href);c.hash!==f&&(c.hash=f,history.replaceState({},"",`${c}`))}else c.hash="",history.replaceState({},"",`${c}`)}),Na(e,{viewport$:t,header$:r}).pipe(w(a=>i.next(a)),A(()=>i.complete()),m(a=>R({ref:e},a)))})}function Da(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:s}})=>s),Le(2,1),m(([s,a])=>s>a&&a>0),X()),i=r.pipe(m(({active:s})=>s));return B([i,n]).pipe(m(([s,a])=>!(s&&a)),X(),Y(o.pipe(je(1))),re(!0),Tt({delay:250}),m(s=>({hidden:s})))}function ni(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new x,s=i.pipe(Z(),re(!0));return i.subscribe({next({hidden:a}){e.hidden=a,a?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(Y(s),ee("height")).subscribe(({height:a})=>{e.style.top=`${a+16}px`}),h(e,"click").subscribe(a=>{a.preventDefault(),window.scrollTo({top:0})}),Da(e,{viewport$:t,main$:o,target$:n}).pipe(w(a=>i.next(a)),A(()=>i.complete()),m(a=>R({ref:e},a)))}function ii({document$:e,tablet$:t}){e.pipe(E(()=>q(".md-toggle--indeterminate")),w(r=>{r.indeterminate=!0,r.checked=!1}),se(r=>h(r,"change").pipe(Rr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),ne(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function Va(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function ai({document$:e}){e.pipe(E(()=>q("[data-md-scrollfix]")),w(t=>t.removeAttribute("data-md-scrollfix")),M(Va),se(t=>h(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function si({viewport$:e,tablet$:t}){B([We("search"),t]).pipe(m(([r,o])=>r&&!o),E(r=>j(r).pipe(ze(r?400:100))),ne(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function za(){return location.protocol==="file:"?ht(`${new URL("search/search_index.js",Qr.base)}`).pipe(m(()=>__index),J(1)):Ue(new URL("search/search_index.json",Qr.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var nt=Uo(),_t=Bo(),gt=Jo(_t),Yr=Yo(),Te=nn(),lr=Fr("(min-width: 960px)"),pi=Fr("(min-width: 1220px)"),li=Xo(),Qr=me(),mi=document.forms.namedItem("search")?za():Ve,Br=new x;Fn({alert$:Br});var Gr=new x;te("navigation.instant")&&Wn({location$:_t,viewport$:Te,progress$:Gr}).subscribe(nt);var ci;((ci=Qr.version)==null?void 0:ci.provider)==="mike"&&qn({document$:nt});_(_t,gt).pipe(ze(125)).subscribe(()=>{Ke("drawer",!1),Ke("search",!1)});Yr.pipe(M(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=ce("link[rel=prev]");typeof t!="undefined"&&ot(t);break;case"n":case".":let r=ce("link[rel=next]");typeof r!="undefined"&&ot(r);break;case"Enter":let o=Re();o instanceof HTMLLabelElement&&o.click()}});ii({document$:nt,tablet$:lr});ai({document$:nt});si({viewport$:Te,tablet$:lr});var Xe=kn(Ee("header"),{viewport$:Te}),Lt=nt.pipe(m(()=>Ee("main")),E(e=>Rn(e,{viewport$:Te,header$:Xe})),J(1)),qa=_(...oe("consent").map(e=>cn(e,{target$:gt})),...oe("dialog").map(e=>Cn(e,{alert$:Br})),...oe("header").map(e=>Hn(e,{viewport$:Te,header$:Xe,main$:Lt})),...oe("palette").map(e=>Pn(e)),...oe("progress").map(e=>In(e,{progress$:Gr})),...oe("search").map(e=>Gn(e,{index$:mi,keyboard$:Yr})),...oe("source").map(e=>ti(e))),Ka=H(()=>_(...oe("announce").map(e=>sn(e)),...oe("content").map(e=>An(e,{viewport$:Te,target$:gt,print$:li})),...oe("content").map(e=>te("search.highlight")?Jn(e,{index$:mi,location$:_t}):L),...oe("header-title").map(e=>$n(e,{viewport$:Te,header$:Xe})),...oe("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?jr(pi,()=>Kr(e,{viewport$:Te,header$:Xe,main$:Lt})):jr(lr,()=>Kr(e,{viewport$:Te,header$:Xe,main$:Lt}))),...oe("tabs").map(e=>ri(e,{viewport$:Te,header$:Xe})),...oe("toc").map(e=>oi(e,{viewport$:Te,header$:Xe,main$:Lt,target$:gt})),...oe("top").map(e=>ni(e,{viewport$:Te,header$:Xe,main$:Lt,target$:gt})))),fi=nt.pipe(E(()=>Ka),qe(qa),J(1));fi.subscribe();window.document$=nt;window.location$=_t;window.target$=gt;window.keyboard$=Yr;window.viewport$=Te;window.tablet$=lr;window.screen$=pi;window.print$=li;window.alert$=Br;window.progress$=Gr;window.component$=fi;})(); +//# sourceMappingURL=bundle.aecac24b.min.js.map + diff --git a/assets/javascripts/bundle.aecac24b.min.js.map b/assets/javascripts/bundle.aecac24b.min.js.map new file mode 100644 index 000000000..b1534de53 --- /dev/null +++ b/assets/javascripts/bundle.aecac24b.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/clipboard/dist/clipboard.js", "node_modules/escape-html/index.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/rxjs/node_modules/tslib/tslib.es6.js", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/sample.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*\n * Copyright (c) 2016-2023 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation.\r\n\r\nPermission to use, copy, modify, and/or distribute this software for any\r\npurpose with or without fee is hereby granted.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\r\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\r\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\r\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\r\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\r\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\r\nPERFORMANCE OF THIS SOFTWARE.\r\n***************************************************************************** */\r\n/* global Reflect, Promise */\r\n\r\nvar extendStatics = function(d, b) {\r\n extendStatics = Object.setPrototypeOf ||\r\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\r\n return extendStatics(d, b);\r\n};\r\n\r\nexport function __extends(d, b) {\r\n if (typeof b !== \"function\" && b !== null)\r\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\r\n extendStatics(d, b);\r\n function __() { this.constructor = d; }\r\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n}\r\n\r\nexport var __assign = function() {\r\n __assign = Object.assign || function __assign(t) {\r\n for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n s = arguments[i];\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n }\r\n return t;\r\n }\r\n return __assign.apply(this, arguments);\r\n}\r\n\r\nexport function __rest(s, e) {\r\n var t = {};\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n t[p] = s[p];\r\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n t[p[i]] = s[p[i]];\r\n }\r\n return t;\r\n}\r\n\r\nexport function __decorate(decorators, target, key, desc) {\r\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n}\r\n\r\nexport function __param(paramIndex, decorator) {\r\n return function (target, key) { decorator(target, key, paramIndex); }\r\n}\r\n\r\nexport function __metadata(metadataKey, metadataValue) {\r\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n}\r\n\r\nexport function __awaiter(thisArg, _arguments, P, generator) {\r\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n return new (P || (P = Promise))(function (resolve, reject) {\r\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n });\r\n}\r\n\r\nexport function __generator(thisArg, body) {\r\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n function verb(n) { return function (v) { return step([n, v]); }; }\r\n function step(op) {\r\n if (f) throw new TypeError(\"Generator is already executing.\");\r\n while (_) try {\r\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n if (y = 0, t) op = [op[0] & 2, t.value];\r\n switch (op[0]) {\r\n case 0: case 1: t = op; break;\r\n case 4: _.label++; return { value: op[1], done: false };\r\n case 5: _.label++; y = op[1]; op = [0]; continue;\r\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n default:\r\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n if (t[2]) _.ops.pop();\r\n _.trys.pop(); continue;\r\n }\r\n op = body.call(thisArg, _);\r\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n }\r\n}\r\n\r\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });\r\n}) : (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n o[k2] = m[k];\r\n});\r\n\r\nexport function __exportStar(m, o) {\r\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\r\n}\r\n\r\nexport function __values(o) {\r\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n if (m) return m.call(o);\r\n if (o && typeof o.length === \"number\") return {\r\n next: function () {\r\n if (o && i >= o.length) o = void 0;\r\n return { value: o && o[i++], done: !o };\r\n }\r\n };\r\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n}\r\n\r\nexport function __read(o, n) {\r\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n if (!m) return o;\r\n var i = m.call(o), r, ar = [], e;\r\n try {\r\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n }\r\n catch (error) { e = { error: error }; }\r\n finally {\r\n try {\r\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n }\r\n finally { if (e) throw e.error; }\r\n }\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spread() {\r\n for (var ar = [], i = 0; i < arguments.length; i++)\r\n ar = ar.concat(__read(arguments[i]));\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spreadArrays() {\r\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n r[k] = a[j];\r\n return r;\r\n}\r\n\r\nexport function __spreadArray(to, from, pack) {\r\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\r\n if (ar || !(i in from)) {\r\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\r\n ar[i] = from[i];\r\n }\r\n }\r\n return to.concat(ar || Array.prototype.slice.call(from));\r\n}\r\n\r\nexport function __await(v) {\r\n return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n}\r\n\r\nexport function __asyncGenerator(thisArg, _arguments, generator) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n function fulfill(value) { resume(\"next\", value); }\r\n function reject(value) { resume(\"throw\", value); }\r\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n}\r\n\r\nexport function __asyncDelegator(o) {\r\n var i, p;\r\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n}\r\n\r\nexport function __asyncValues(o) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var m = o[Symbol.asyncIterator], i;\r\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n}\r\n\r\nexport function __makeTemplateObject(cooked, raw) {\r\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n return cooked;\r\n};\r\n\r\nvar __setModuleDefault = Object.create ? (function(o, v) {\r\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\r\n}) : function(o, v) {\r\n o[\"default\"] = v;\r\n};\r\n\r\nexport function __importStar(mod) {\r\n if (mod && mod.__esModule) return mod;\r\n var result = {};\r\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\r\n __setModuleDefault(result, mod);\r\n return result;\r\n}\r\n\r\nexport function __importDefault(mod) {\r\n return (mod && mod.__esModule) ? mod : { default: mod };\r\n}\r\n\r\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\r\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\r\n}\r\n\r\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\r\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\r\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\r\n}\r\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an