diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..dd84ea78 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,38 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: '' +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Desktop (please complete the following information):** + - OS: [e.g. iOS] + - Browser [e.g. chrome, safari] + - Version [e.g. 22] + +**Smartphone (please complete the following information):** + - Device: [e.g. iPhone6] + - OS: [e.g. iOS8.1] + - Browser [e.g. stock browser, safari] + - Version [e.g. 22] + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..bbcbbe7d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: '' +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/workflows/ci-sphinx.yml b/.github/workflows/ci-sphinx.yml index 81b1c8fd..426e792a 100644 --- a/.github/workflows/ci-sphinx.yml +++ b/.github/workflows/ci-sphinx.yml @@ -22,6 +22,8 @@ jobs: steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - uses: actions/checkout@v3 + with: + submodules: recursive - name: Setup Poetry uses: snok/install-poetry@v1 - name: Poetry install docs dependencies @@ -29,7 +31,8 @@ jobs: poetry --version poetry config virtualenvs.in-project true #poetry config virtualenvs.create false - poetry install -E docs + poetry lock --no-update + poetry install -E "docs experiment" - name: Sphinx Build Check run: | source .venv/bin/activate @@ -45,9 +48,9 @@ jobs: echo Doc build success exit 0 fi - - name: Sphinx Link Check - run: | - source .venv/bin/activate - cd docs - make clean - make linkcheck + #- name: Sphinx Link Check + #run: | + #source .venv/bin/activate + #cd docs + #make clean + #make linkcheck diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index d2251fc0..055562a3 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -42,7 +42,8 @@ jobs: - name: Install dependencies run: | poetry config virtualenvs.in-project true - poetry install + poetry lock --no-update + poetry install -E "docs experiment" # Test MiV-OS using pytest - name: Run tests run: | @@ -78,7 +79,8 @@ jobs: - name: Install dependencies run: | poetry config virtualenvs.in-project true - poetry install + poetry lock --no-update + poetry install -E "docs experiment" # Set environment variables for coverage test. Coverage test is done using python 3.8 - name: Run style checks run: | diff --git a/.gitignore b/.gitignore index 088ad4ab..e285c504 100644 --- a/.gitignore +++ b/.gitignore @@ -203,6 +203,7 @@ TODO.md *.png *.eps *.pdf +*.svg # data files *.dat @@ -234,3 +235,6 @@ outcmaes/* # csv files *.csv + +# Emacs backup: +*~ diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..b67a52b0 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "docs/MiV-Shared-Docs"] + path = docs/MiV-Shared-Docs + url = https://github.com/GazzolaLab/MiV-Shared-Docs diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 88acd3e8..53152246 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -1,18 +1,26 @@ # .readthedocs.yaml version: 2 +submodules: + include: all + # Set the version of Python and other tools you might need build: os: ubuntu-20.04 - tools: {python: "3.10"} + tools: + python: "3.10" jobs: - pre_create_environment: - - asdf plugin add poetry - - asdf install poetry latest - - asdf global poetry latest - - poetry config virtualenvs.create false # Key - post_install: - - poetry install -E docs + post_create_environment: + - pip install --upgrade pip + +python: + install: + - method: pip + path: . + extra_requirements: + - docs + - experiment + system_packages: true # Build documentation in the docs/ directory with Sphinx sphinx: diff --git a/RELEASE.md b/RELEASE.md index 92ee76c9..0d338497 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,3 +1,29 @@ +# Release Note (version 0.2.1) + +## What's Changed + +* Test Cases + Bug fix binned_spiketrain by @Gauravu2 in https://github.com/GazzolaLab/MiV-OS/pull/93 +* Shift documentation theme to PyData by @skim0119 in https://github.com/GazzolaLab/MiV-OS/pull/94 +* Initial implementation of MiV HDF5-based data format by @iraikov in https://github.com/GazzolaLab/MiV-OS/pull/90 +* TTL event readout from OpenEphys dataset by @skim0119 in https://github.com/GazzolaLab/MiV-OS/pull/99 +* Add core datatypes: SpikeTrain by @skim0119 in https://github.com/GazzolaLab/MiV-OS/pull/102 +* Setup dependabot by @skim0119 in https://github.com/GazzolaLab/MiV-OS/pull/103 +* Documentation theme updated to `pydata-theme` + +## New Contributors + +* @iraikov made their first contribution in https://github.com/GazzolaLab/MiV-OS/pull/90 +* @dependabot made their first contribution in https://github.com/GazzolaLab/MiV-OS/pull/107 + +## Dependency Version Upgrade + +* build(deps-dev): bump flake8 from 4.0.1 to 5.0.4 by @dependabot in https://github.com/GazzolaLab/MiV-OS/pull/107 +* build(deps): bump codecov/codecov-action from 2 to 3 by @dependabot in https://github.com/GazzolaLab/MiV-OS/pull/105 +* build(deps): bump actions/setup-python from 2.2.2 to 4.2.0 by @dependabot in https://github.com/GazzolaLab/MiV-OS/pull/104 +* build(deps-dev): bump pylint from 2.14.5 to 2.15.0 by @dependabot in https://github.com/GazzolaLab/MiV-OS/pull/111 + +**Full Changelog**: https://github.com/GazzolaLab/MiV-OS/compare/v0.2.0...v0.2.1 + # Release Note (version 0.2.0) [Milestone](https://github.com/GazzolaLab/MiV-OS/issues/30) diff --git a/docs/.gitignore b/docs/.gitignore index 59df8bc6..7ca9e2a2 100644 --- a/docs/.gitignore +++ b/docs/.gitignore @@ -1,2 +1,3 @@ _external_assets _assets +**/datasets diff --git a/docs/MiV-Shared-Docs b/docs/MiV-Shared-Docs new file mode 160000 index 00000000..0da03f29 --- /dev/null +++ b/docs/MiV-Shared-Docs @@ -0,0 +1 @@ +Subproject commit 0da03f292cd69545f338afeb307794c9093b54d6 diff --git a/docs/_templates/miv-switcher.html b/docs/_templates/miv-switcher.html new file mode 100644 index 00000000..052228d7 --- /dev/null +++ b/docs/_templates/miv-switcher.html @@ -0,0 +1,23 @@ + diff --git a/docs/api/index.rst b/docs/api/index.rst new file mode 100644 index 00000000..f7d34353 --- /dev/null +++ b/docs/api/index.rst @@ -0,0 +1,22 @@ +API Documentation +================= + +.. toctree:: + :maxdepth: 2 + :caption: API + + io + signal + sorting + statistics + causality + visualization + signal_generator + coding + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/api/io.rst b/docs/api/io.rst index 03f6b776..21656799 100644 --- a/docs/api/io.rst +++ b/docs/api/io.rst @@ -1,8 +1,26 @@ ************************************** -Data Managing Module (:mod:`miv.io`) +IO Module (:mod:`miv.io`) ************************************** +Python Data Managing +==================== + .. automodule:: miv.io.data .. automodule:: miv.io.binary :members: + +External Datafile (H5) +====================== + +.. automodule:: miv.io.file + :members: + +Serial Communication Helper +=========================== + +.. automodule:: miv.io.serial.arduino + :members: + +.. automodule:: miv.io.serial.stimjim + :members: diff --git a/docs/conf.py b/docs/conf.py index 09765c19..986cb812 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -15,8 +15,7 @@ import os import sys - -# import sphinx_rtd_theme +from urllib.request import urlretrieve sys.path.insert(0, os.path.abspath("../")) @@ -24,7 +23,7 @@ # -- Project information ----------------------------------------------------- -project = "Mind-in-Vitro MiV-OS" +project = "Mind-in-Vitro" copyright = "2022, GazzolaLab" author = "Gazzola Lab" @@ -40,13 +39,13 @@ "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.autosectionlabel", + "sphinx.ext.mathjax", "sphinx_autodoc_typehints", #'sphinx.ext.napoleon', "sphinx.ext.viewcode", "sphinx_togglebutton", "sphinx_copybutton", - "sphinx_rtd_theme", - "sphinx.ext.mathjax", + "sphinxcontrib.mermaid", "numpydoc", # "myst_parser", # Moving onto jupyter-notebook style "myst_nb", @@ -97,23 +96,66 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = "sphinx_book_theme" +html_theme = "pydata_sphinx_theme" +html_logo = "MiV-Shared-Docs/_static/assets/logo1.svg" +html_favicon = html_logo +html_sourcelink_suffix = "" html_theme_options = { - "repository_url": "https://github.com/GazzolaLab/MiV-OS", - "use_repository_button": True, + "logo": { + # "link": "https://mindinvitro.illinois.edu/", + "text": "Open Software" + }, + # Navbar Configuration + "navbar_start": ["navbar-logo", "miv-switcher.html"], + "navbar_center": ["navbar-nav"], + # "navbar_end": ["navbar-icon-links"], + # Header Link + "external_links": [ + # {"name": "link-one-name", "url": "https://"}, + ], + "icon_links": [ + { + # Label for this link + "name": "GitHub", + "url": "https://github.com/GazzolaLab/MiV-OS", # required + # Icon class (if "type": "fontawesome"), or path to local image (if "type": "local") + # The type of image to be used (see below for details) + "icon": "fab fa-github-square", + "type": "fontawesome", + } + ], + # Sidebar Configuraion + "left_sidebar_end": [], + # Theme (https://help.farbox.com/pygments.html) + # "pygment_light_style": "default", + # "pygment_dark_style": "native", + "use_edit_page_button": True, } html_title = "MiV-OS" -# html_logo = "" -# pygments_style = "sphinx" + +html_context = { + "default_mode": "dark", + "github_user": "GazzolaLab", + "github_repo": "MiV-OS", + "github_version": "main", + "doc_path": "docs", +} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static", "_static/assets"] +html_static_path = ["_static"] html_css_files = ["css/*", "css/logo.css"] # -- Options for numpydoc --------------------------------------------------- numpydoc_show_class_members = False # -- Options for myst-nb --------------------------------------------------- -nb_execution_mode = "off" +nb_execution_mode = "cache" +myst_heading_anchors = 3 + +# External Data Download +# urlretrieve( # Logo image file +# "https://raw.githubusercontent.com/skim0119/MiV-OS/assets_logo/docs/_static/assets/logo1.svg", +# html_logo, +# ) diff --git a/docs/discussion/index.rst b/docs/discussion/index.rst new file mode 100644 index 00000000..f24ce72a --- /dev/null +++ b/docs/discussion/index.rst @@ -0,0 +1,15 @@ +Discussion +========== + +.. toctree:: + :maxdepth: 2 + :caption: Advanced/Discussion + + auto_channel_mask_demo + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/examples/spike_detection_rst.temp b/docs/examples/spike_detection_rst.temp deleted file mode 100644 index fb5bf965..00000000 --- a/docs/examples/spike_detection_rst.temp +++ /dev/null @@ -1,6 +0,0 @@ -Spike Detection Example -####################### - -.. literalinclude:: ../../examples/post_processing/spike_detection.py - :linenos: - :language: python diff --git a/docs/guide/burst_analysis.md b/docs/guide/burst_analysis.md index 9fbdcfb8..23cf2287 100644 --- a/docs/guide/burst_analysis.md +++ b/docs/guide/burst_analysis.md @@ -9,6 +9,9 @@ kernelspec: display_name: Python 3 (ipykernel) language: python name: python3 +file_format: mystnb +mystnb: + execution_mode: 'off' --- # Burst Analysis diff --git a/docs/guide/channel_correlation.md b/docs/guide/channel_correlation.md index 43f7869c..0c2eacf4 100644 --- a/docs/guide/channel_correlation.md +++ b/docs/guide/channel_correlation.md @@ -9,6 +9,9 @@ kernelspec: display_name: Python 3 (ipykernel) language: python name: python3 +file_format: mystnb +mystnb: + execution_mode: 'off' --- # Channel-wise Signal Correlation diff --git a/docs/guide/connectivity_network.md b/docs/guide/connectivity_network.md index 41ac97e0..5e281372 100644 --- a/docs/guide/connectivity_network.md +++ b/docs/guide/connectivity_network.md @@ -9,6 +9,9 @@ kernelspec: display_name: Python 3 (ipykernel) language: python name: python3 +file_format: mystnb +mystnb: + execution_mode: 'off' --- # Channel-wise Signal Correlation @@ -59,7 +62,7 @@ with data.load() as (signal, timestamps, sampling_rate): spiketrains = spike_detection(signal, timestamps, sampling_rate) ``` -## Provide MEA mao +## Provide MEA map ```{code-cell} ipython3 #Matrix containing electrode numbers according to their spatial location @@ -93,12 +96,14 @@ Plots connectivity using the provided MEA map and connectivity matrix. Documenta ```{code-cell} ipython3 #Non-interactive connectivity plot using correlation -plot_connectivity(mea_map, corrcoef_matrix, False) +G = plot_connectivity(mea_map, corrcoef_matrix, False) +G.view() ``` ```{code-cell} ipython3 #Interactive connectivity plot using correlation -plot_connectivity_interactive(mea_map, corrcoef_matrix, False) +N = plot_connectivity_interactive(mea_map, corrcoef_matrix, False) +N.show("nodes.html") ``` diff --git a/docs/guide/data_structure/hdf5_based_data_format.md b/docs/guide/data_structure/hdf5_based_data_format.md new file mode 100644 index 00000000..d03a3d8c --- /dev/null +++ b/docs/guide/data_structure/hdf5_based_data_format.md @@ -0,0 +1,84 @@ +--- +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.14.1 +kernelspec: + display_name: Python 3 + language: python + name: python3 +mystnb: + execution_mode: 'off' +--- + +# HDF5 Read + +```{code-cell} ipython3 +:tags: [hide-cell] + +# Import required modules +import os +import sys + +import matplotlib.cm as cm +import matplotlib.colors as colors +import matplotlib.pyplot as plt +import numpy as np +from sklearn.decomposition import PCA +from sklearn.mixture import GaussianMixture +from viziphant.rasterplot import rasterplot_rates + +import miv +from miv.io import DataManager +from miv.io import file as miv_file +from miv.signal.filter import ButterBandpass, FilterCollection, MedianFilter +from miv.signal.spike import PCADecomposition, SpikeSorting, ThresholdCutoff +from miv.statistics import firing_rates, signal_to_noise +from miv.visualization import extract_waveforms, plot_frequency_domain, plot_waveforms +``` + +## Read h5 + +```{code-cell} ipython3 +recording_id = "2022-03-10_16-19-09-Record Node 104-experiment1_spontaneous-recording1" +input_data, data_container = miv_file.read( + "2022-03-10_16-19-09/MiV_data.h5", groups=recording_id +) +``` + +## Data Access + +```{code-cell} ipython3 +signal = input_data[f"{recording_id}/signal"] +timestamps = input_data[f"{recording_id}/timestamps"] +sampling_rate = input_data[f"{recording_id}/sampling_rate"][0] +num_channel = signal.shape[-1] +``` + +## Processing Example + +```{code-cell} ipython3 +# Set up filters, we use butter bandpass and median filters here. More details are here (https://miv-os.readthedocs.io/en/latest/api/signal.html) +signal_filter = ( + FilterCollection() + .append(ButterBandpass(300, 3000, order=4)) + .append(MedianFilter(threshold=60, k=20)) +) + +# Set threshold for Signal to noise ratio to detect spikes +spike_detection = ThresholdCutoff(cutoff=5) + +# Preprocess +signal = signal_filter(signal[0], sampling_rate) +spiketrains = spike_detection(signal, timestamps, sampling_rate) + +# Estimate Firing Rate +stat = firing_rates(spiketrains)["rates"] + +# Plot rasterplot using viziphant + +plt.figure(figsize=(24, 8)) +a, b, c = rasterplot_rates(spiketrains, ax=plt.gca()) +``` diff --git a/docs/guide/granger_causality_psd_cpsd_coherence.md b/docs/guide/granger_causality_psd_cpsd_coherence.md index b2c980ad..db936485 100644 --- a/docs/guide/granger_causality_psd_cpsd_coherence.md +++ b/docs/guide/granger_causality_psd_cpsd_coherence.md @@ -9,6 +9,9 @@ kernelspec: display_name: Python 3 (ipykernel) language: python name: python3 +file_format: mystnb +mystnb: + execution_mode: 'off' --- # Connectivity Analysis diff --git a/docs/guide/index.rst b/docs/guide/index.rst new file mode 100644 index 00000000..6234f300 --- /dev/null +++ b/docs/guide/index.rst @@ -0,0 +1,37 @@ +How-To Guide +============ + +Here we provide number of example scripts using `MiV` tools. Some examples provide additional files or links to published paper for a complete description. Examples are written to serve as a starting template for customized usages. + +.. toctree:: + :maxdepth: 2 + :caption: Analysis + + channel_correlation + granger_causality_psd_cpsd_coherence + burst_analysis + info_theory + connectivity_network + lyon_ear_model + +.. toctree:: + :maxdepth: 2 + :caption: Experiment + + stimjim_spiketrain + +.. toctree:: + :maxdepth: 2 + :caption: Data / Data Structure + + sample_datasets + data_structure/hdf5_based_data_format + read_TTL_events + read_binary + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/guide/info_theory.md b/docs/guide/info_theory.md index bf4f1803..171e8473 100644 --- a/docs/guide/info_theory.md +++ b/docs/guide/info_theory.md @@ -9,6 +9,9 @@ kernelspec: display_name: Python 3 (ipykernel) language: python name: python3 +file_format: mystnb +mystnb: + execution_mode: 'off' --- # Information Theory diff --git a/docs/guide/lyon_ear_model.md b/docs/guide/lyon_ear_model.md index ff2c56ae..49895a59 100644 --- a/docs/guide/lyon_ear_model.md +++ b/docs/guide/lyon_ear_model.md @@ -9,6 +9,9 @@ kernelspec: display_name: Python 3 (ipykernel) language: python name: python3 +file_format: mystnb +mystnb: + execution_mode: 'off' --- # Temporal Encoding: Lyon Ear Model diff --git a/docs/guide/read_TTL_events.md b/docs/guide/read_TTL_events.md new file mode 100644 index 00000000..d0f3c804 --- /dev/null +++ b/docs/guide/read_TTL_events.md @@ -0,0 +1,59 @@ +--- +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.14.1 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +# Read TTL Events in OpenEphys Data + +```{code-cell} ipython3 +:tags: [hide-cell] + +from miv.datasets.ttl_events import load_data +``` + +## Load example dataset + +TTL event are compose of five data types: states, full_words, timestamps, sampling rate, and initial state. + +- The details of what each data represent is described [here](https://open-ephys.github.io/gui-docs/User-Manual/Recording-data/Binary-format.html#events). +- The API documentation is [here](file:///Users/skim0119/github/MiV-OS/docs/_build/html/api/io.html#miv.io.binary.apply_channel_mask). + +> Note: The timestamps data are already synchronized with other recording streams. To match the time, make sure to turn off `start_at_zero` when loading the signal. + +```{code-cell} ipython3 +:tags: [hide-cell] + +datasets = load_data() +``` + +```{code-cell} ipython3 +data = datasets[0] +states, full_words, timestamps, sampling_rate, initial_state = data.load_ttl_event() +``` + +## Visualize TTL Events + +```{code-cell} ipython3 +:tags: [hide-cell] + +import numpy as np +import matplotlib.pyplot as plt +``` + +```{code-cell} ipython3 +on = timestamps[states == 1] +off = timestamps[states == -1] + +for start, end in zip(on, off): + plt.axvspan(start, end, alpha=0.4, color='red') +plt.title("TTL ON/OFF Events") +plt.xlabel("time (s)") +``` diff --git a/docs/guide/read_binary.md b/docs/guide/read_binary.md new file mode 100644 index 00000000..17dbfc1a --- /dev/null +++ b/docs/guide/read_binary.md @@ -0,0 +1,83 @@ +--- +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.13.8 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +file_format: mystnb +mystnb: + execution_mode: 'off' +--- + +# Read Raw Binary File + +Some interface and recording device saves in raw binary format. Here, we provide a method of how to import raw binary signal. + +We assume the data is saved in column-major matrix, where the shape of the matrix is (num_sample, num_channel). +For example, the binary with N sample M channel is saved in the order (sample 1 from channel 1), (sample 1 from sample 2), ..., (sample 1 of channel N), (sample 2 of channel 1), ..., (sample N of channel M). + +> This method can be used to load large data sample that cannot fit in RAM memory at once. In such case, be careful to process data in segment. +> When reading the data directly from binary file, it is user's responsibility to convert binary data to voltage. + +```{code-cell} ipython3 +:tags: [hide-cell] + +import os, sys +import numpy as np + +from tqdm import tqdm + +from miv.io import load_continuous_data_file +``` + +## Import Data + +To read + +```{code-cell} ipython3 +file_path: str = "" +num_channels: int = 512 +sampling_rate:int = 20_000 +raw_signal, timestamps = load_continuous_data(filepath, num_channels=num_channels, sampling_rate=sampling_rate) +print(raw_signal.shape) # (num_samples, num_channels) +``` + +## Prepare Spike Detection + +For demonstration, lets use a simple bandpass filter with threshold detection to get spikestamps. + +```{code-cell} ipython3 +:tags: [hide-cell] + +from miv.signal.filter import ButterBandpass +from miv.signal.spike import ThresholdCutoff +``` + +```{code-cell} ipython3 +pre_filter = ButterBandpass(lowcut=300, highcut=3000, order=5) +spike_detection = ThresholdCutoff() +``` + +## Get SpikeTrain + +To iterate segment of large dataset, it is recommended to use `np.array_split` which returns array `view` of partial segment. + +```{code-cell} ipython3 +total_spiketrain = None +n_split = 1000 + +raw_signal_split = np.array_split(raw_signal, n_split, axis=0) +timestamps_split = np.array_split(timestamps, n_split) +for signal_seg, timestamps_seg in tqdm(zip(raw_signal_split, timestamps_split)): + filtered_signal = pre_filter(signal_seg, sampling_rate=sampling_rate) + spks = spike_detection(filtered_signal, timestamps_seg, sampling_rate=sampling_rate, progress_bar=False) + if total_spiketrain: + total_spiketrain.merge(spks) + else: + total_spiketrain = spks +``` diff --git a/docs/examples/sample_datasets.rst b/docs/guide/sample_datasets.rst similarity index 100% rename from docs/examples/sample_datasets.rst rename to docs/guide/sample_datasets.rst diff --git a/docs/guide/stimjim_spiketrain.md b/docs/guide/stimjim_spiketrain.md new file mode 100644 index 00000000..231c24dd --- /dev/null +++ b/docs/guide/stimjim_spiketrain.md @@ -0,0 +1,158 @@ +--- +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.13.8 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +# Send Spiketrain to Stimjim + + +Throughout the guide, we use `PySerial` package for serial communication with Stimjim (Arduino base). +This dependency is not included as part of `MiV-OS`, hence user must install it separately: +```bash +pip install pyserial +``` + +```{code-cell} ipython3 +:tags: [hide-cell] + +import time +from miv.io.serial import StimjimSerial +``` + +## Open Connection + ++++ + +- Baudrate: [here](https://bitbucket.org/natecermak/stimjim/src/c23d98eb90725888241dedc7cab83cacd2bb288e/stimjimPulser/stimjimPulser.ino#lines-246) +- Listing ports: `python -m serial.tools.list_ports` + - Also you can run as: + ```py + from miv.io.serial import list_serial_ports + list_serial_ports() + ``` + +```{code-cell} ipython3 +stimjim = StimjimSerial(port="COM3") +# stimjim.connect() +``` + +## Serial Communication Protocol + +The module `ArduionSerial` provides basic operations: `send` and `wait`. +- `send`: Send string message through serial port. +- `wait`: Wait until the buffer is returned. Receive reply from Arduino(Stimjim) device. + +```{raw-cell} +count = 0 +max_iteration = 100 +prevTime = time.time() +for i in range(max_iteration): + # check for a reply from board + reply = "" + stimjim.send(f"Test Message: {count}") + msg = stimjim.wait() + print (f"Time {time.time()}") + print(msg) + + + prevTime = time.time() + count += 1 +``` + +> Check if `.is_open` returns `True` before running complex stimulation. +> If it refuse to open communication port due to `Permission` issue, check if `Serial Monitor` is opened on `Arduino IDE` (Or check for any other serial connection). + ++++ + +## Example Stimulation Run + +```{raw-cell} +def write_and_read(s): + arduino.write(bytes(s, 'utf-8')) + time.sleep(10) + data = ser.readlines() + return data +``` + +```{raw-cell} +stimjim.send('D\n') +msg = stimjim.wait() +print(msg) +``` + +```{raw-cell} +cmd = "S0,0,3,100000,1000000;4500,0,50000\n" +``` + +## Generate Musical Pitch + +```{code-cell} ipython3 +from math import log2, pow +import IPython +import numpy as np +import matplotlib.pyplot as plt +from scipy.io import wavfile +import scipy.signal +from miv.coding.temporal import LyonEarModel, BensSpikerAlgorithm +``` + +```{code-cell} ipython3 +A4 = 440 +C0 = A4*pow(2, -4.75) +name = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"] + +def pitch(freq): + h = round(12*log2(freq/C0)) + octave = h // 12 + n = h % 12 + return name[n] + str(octave) +``` + +### Ben's Spiker Algorithm + +```{code-cell} ipython3 +frequency = 4 +sampling_rate = 40 +print(pitch(frequency)) +``` + +```{code-cell} ipython3 +t = np.linspace(0,2*np.pi / frequency, int(sampling_rate / (1.0 / (2*np.pi/frequency)))) +y = np.sin(frequency * t) +``` + +```{code-cell} ipython3 +bsa = BensSpikerAlgorithm(int(sampling_rate / (1.0 / (2*np.pi/frequency))), threshold=1.1, normalize=True) +spikes, timestamps = bsa(y[:,None]) +events = np.where(spikes)[0] +spiketrain = t[events] +``` + +```{code-cell} ipython3 +plt.eventplot(spiketrain) +plt.plot(t,y, 'r') +plt.xlabel('time') +plt.ylabel('y') +plt.show() +``` + +```{code-cell} ipython3 +spiketrain_micro = (spiketrain * 1e6).astype(np.int_) +t_max = int(1e6 * 2 * np.pi / frequency) +``` + +```{code-cell} ipython3 +stimjim.send_spiketrain(0, spiketrain_micro, t_max, int(1e6 * 60)) +``` + +```{code-cell} ipython3 + +``` diff --git a/docs/index.rst b/docs/index.rst index ea9b1f4d..d92bdc09 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,19 +1,9 @@ -.. MiV-OS documentation master file, created by - sphinx-quickstart on Thu Mar 24 23:35:49 2022. - -MiV-OS documentation! -===================== +Mind-in-Vitro Open Software +=========================== **MiV-OS** is a *free* and *open-source* software project for the post-processing and analysis of **mind-in-vitro** project. -The software is developed and maintained by the Gazzola Lab at the University of Illinois at Urbana-Champaign. For more information on the projects and what we work on, visit our `website `_. - -Installation Instruction ------------------------- - -:code:`pip install MiV-OS` - -You can also download the source code from `GitHub `_ directly. +The software is developed and maintained by the Gazzola Lab at the University of Illinois at Urbana-Champaign. For more information on the projects and what we work on, visit our `website `_. Contribution ------------ @@ -26,54 +16,11 @@ If you are interested in contributing to this project, we prepared contribution :maxdepth: 2 :caption: Overview - overview/about - overview/references - Contribution - -.. toctree:: - :maxdepth: 2 - :caption: User Guide - - guide/signal_processing - guide/spike_cutout - guide/spike_sorting - guide/channel_correlation - guide/granger_causality_psd_cpsd_coherence - guide/burst_analysis - guide/info_theory - guide/connectivity_network - guide/lyon_ear_model - -.. toctree:: - :maxdepth: 2 - :caption: API - - api/io - api/signal - api/sorting - api/statistics - api/causality - api/visualization - api/signal_generator - api/coding - -.. toctree:: - :maxdepth: 2 - :caption: Examples - - examples/sample_datasets - -.. toctree:: - :maxdepth: 2 - :caption: Advanced/Discussion - - discussion/auto_channel_mask_demo - -.. toctree:: - :maxdepth: 2 - :caption: Site - - Gazzola Lab + MiV-Shared-Docs/overview/index + tutorial/index + guide/index + api/index + discussion/index Indices and tables ================== diff --git a/docs/overview/about.md b/docs/overview/about.md deleted file mode 100644 index 800ce4a7..00000000 --- a/docs/overview/about.md +++ /dev/null @@ -1,15 +0,0 @@ -# About - -`MiV-OS` is a open-source software project to provide modular framework for operating neuromorphic computation. - -## Related Projects - -By default, the installation only includes basic spike analysis tools. Further plugins, such as high-performance-computing (HPC) analysis or neural simulator, are still under development. - -| Name | Planed | Description | Repo | Current Status | -| ---------------- | --------------- | ------------------------------------------------------------ | ---------------- | -------------- | -| Visualizer/Interface | 2022 Q2 | Web-based interactive visualizer for spike analysis | private | In progress | -| Simulator | 2022 Q3 | Neuron emulator | private | In progress | -| Reservoir Computing | 2022 Q3 | Reservoir computing module for neuromorphic hardware/emulator | | | - -For more information on Mind *in Vitro*, see the project [website](https://mindinvitro.illinois.edu). diff --git a/docs/overview/references.rst b/docs/overview/references.rst deleted file mode 100644 index 8ba17921..00000000 --- a/docs/overview/references.rst +++ /dev/null @@ -1,44 +0,0 @@ -********** -References -********** - -We highly encourage users to explore and integrate other external libraries in -their project. -This documentation also contains series of tutorials and guidelines of how to -use other libraries with `MiV-OS` tools. -If you are having trouble in integrating tools or data structure from other -packages, feel free to make a suggestion on our `GitHub-issue `_. - -Below are the list of external libraries that provides wide support for advanced -spike processing. -Some of the packages are essential dependencies for our project as well. - -`Neural Ensemble `_ -################################################ - -- Python-Neo [1]_ -- Elephant/Viziphant [2]_ - -Algorithm -######### - -- PyWavelets [3]_ - -Hardware -######## - -- `Open Ephys `_ - -Other Reads -########### - -- `Neural Data Analysis - Mark Kramer `_ -- `Data Science for Psychology and Neuroscience — in Python `_ - ---------------- - -.. [1] Garcia S., Guarino D., Jaillet F., Jennings T.R., Pröpper R., Rautenberg P.L., Rodgers C., Sobolev A.,Wachtler T., Yger P. and Davison A.P. (2014) Neo: an object model for handling electrophysiology data in multiple formats. Frontiers in Neuroinformatics 8:10: doi:10.3389/fninf.2014.00010 - -.. [2] Denker M, Yegenoglu A, Grün S (2018) Collaborative HPC-enabled workflows on the HBP Collaboratory using the Elephant framework. Neuroinformatics 2018, P19. doi:10.12751/incf.ni2018.0019 - -.. [3] Gregory R. Lee, Ralf Gommers, Filip Wasilewski, Kai Wohlfahrt, Aaron O’Leary (2019). PyWavelets: A Python package for wavelet analysis. Journal of Open Source Software, 4(36), 1237, https://doi.org/10.21105/joss.01237. diff --git a/docs/tutorial/index.rst b/docs/tutorial/index.rst new file mode 100644 index 00000000..a3aa9c57 --- /dev/null +++ b/docs/tutorial/index.rst @@ -0,0 +1,24 @@ +Tutorial +======== + +.. toctree:: + :maxdepth: 2 + :caption: How to Start + + installation + +.. toctree:: + :maxdepth: 2 + :caption: Good Exercises + + signal_processing + spike_cutout + spike_sorting + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/tutorial/installation.rst b/docs/tutorial/installation.rst new file mode 100644 index 00000000..07c6a9b4 --- /dev/null +++ b/docs/tutorial/installation.rst @@ -0,0 +1,21 @@ +Installation +============ + +Instruction +----------- + +:code:`pip install MiV-OS` + +You can also download the source code from `GitHub `_ directly. + +Requirements +------------ + +Before installing `MiV-OS`, please ensure you have the following installed: + +- Python 3 (3.8+ are supported) + +Troubleshooting +--------------- + +If you have any issues during the installation please post a `GitHub-issue `_ with the details. diff --git a/docs/guide/signal_processing.md b/docs/tutorial/signal_processing.md similarity index 82% rename from docs/guide/signal_processing.md rename to docs/tutorial/signal_processing.md index 22a9fe1b..161f1076 100644 --- a/docs/guide/signal_processing.md +++ b/docs/tutorial/signal_processing.md @@ -4,14 +4,19 @@ jupytext: extension: .md format_name: myst format_version: 0.13 - jupytext_version: 1.13.8 + jupytext_version: 1.14.1 kernelspec: display_name: Python 3 (ipykernel) language: python name: python3 +file_format: mystnb +mystnb: + execution_mode: 'off' --- -# Signal Processing Guideline +# Introduction : Quick Start + +Here is a quick-start example of how to start using `MiV-OS`. ```{code-cell} ipython3 :tags: [hide-cell] @@ -20,10 +25,11 @@ import os import numpy as np import quantities as pq import matplotlib.pyplot as plt - ``` -## 1. Data Load +## 1. Data Load (Open Ephys) + +We natively support the output file-structure that `OpenEphys` uses. ```{code-cell} ipython3 :tags: [hide-cell] @@ -33,11 +39,15 @@ from miv.io.data import Data, DataManager ```{code-cell} ipython3 # Load dataset from OpenEphys recording -folder_path: str = "~/Open Ephys/2022-03-10-16-19-09" # Data Path +folder_path: str = "2022-03-10_16-19-09/" # Data Path # Provide the path of experimental recording tree to the DataManager class dataset = DataManager(folder_path) ``` +```{code-cell} ipython3 +dataset.tree() +``` + You should be able to check the data structure by running `dataset.tree()`. @@ -45,7 +55,7 @@ You should be able to check the data structure by running `dataset.tree()`. +++ -We provide a set of basic signal filter tools. It is highly recommended to filter the signal before doing the spike-detection. +We provide a set of basic signal filter tools. Here, we provide examples of how to create and apply the filter to the [`dataset`](../api/io.rst). +++ @@ -60,8 +70,7 @@ from miv.signal.filter import FilterCollection, ButterBandpass ### 2.1 Filter Collection -[Here](api/signal:signal filter) is the list of provided filters. -All filters are `Callable`, taking `signal` and `sampling_rate` as parameters. +All filters are directly `Callable`, taking `signal` and `sampling_rate` as parameters. To define a multiple filters together, we provide [`FilterCollection`](miv.signal.filter.FilterCollection) that execute multiple filters in a series. ```{code-cell} ipython3 @@ -91,16 +100,10 @@ You can check the list of all provided filters [here](../api/signal). # Apply filter to `dataset[0]` with dataset[0].load() as (signal, timestamps, sampling_rate): filtered_signal = pre_filter(signal, sampling_rate) - -# Apply filter to array -rate = 30_000 -filtered_signal = pre_filter(data_array, sampling_rate=rate) ``` ## 3. Spike Detection -You can check the available method [here](api/signal:spike detection). - Most simple example of spike-detection method is using `ThresholdCutoff`. ```{code-cell} ipython3 @@ -117,7 +120,6 @@ spike_detection = ThresholdCutoff() # signal : np.array or neo.core.AnalogSignal, shape(N_channels, N) # timestamps : np.array, shape(N) # sampling_rate : float -timestamps = spike_detection(signal, timestamps, sampling_rate=30_000, cutoff=3.5) # The detection can be applied on the dataset spiketrains = spike_detection(filtered_signal, timestamps, sampling_rate) @@ -134,5 +136,5 @@ from viziphant.rasterplot import rasterplot_rates ```{code-cell} ipython3 # Plot -rasterplot_rates(spiketrain_list) +rasterplot_rates(spiketrains) ``` diff --git a/docs/guide/spike_cutout.md b/docs/tutorial/spike_cutout.md similarity index 96% rename from docs/guide/spike_cutout.md rename to docs/tutorial/spike_cutout.md index b29c321d..1b69cee2 100644 --- a/docs/guide/spike_cutout.md +++ b/docs/tutorial/spike_cutout.md @@ -9,6 +9,9 @@ kernelspec: display_name: Python 3 (ipykernel) language: python name: python3 +file_format: mystnb +mystnb: + execution_mode: 'off' --- # Spike Cutout Visualization diff --git a/docs/guide/spike_sorting.md b/docs/tutorial/spike_sorting.md similarity index 98% rename from docs/guide/spike_sorting.md rename to docs/tutorial/spike_sorting.md index a627244a..655be8df 100644 --- a/docs/guide/spike_sorting.md +++ b/docs/tutorial/spike_sorting.md @@ -9,9 +9,12 @@ kernelspec: display_name: Python 3 (ipykernel) language: python name: python3 +file_format: mystnb +mystnb: + execution_mode: 'off' --- -# +# Spike Sorting ## References diff --git a/examples/README.md b/examples/README.md deleted file mode 100644 index 2769864f..00000000 --- a/examples/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# Code Examples - -This directory contains number of examples usage of `MiV-OS`. - -## Post Processing - -Some examples provide additional files or links to published paper for a complete description. -Examples can serve as a starting template for customized usages. - -* [spike-detection](./post_processing/spike_detection.py) - * __Purpose__: Demonstrate basic use case to read the raw data and process spike-detection. - * __Features__: diff --git a/examples/post_processing/spike_detection.py b/examples/post_processing/spike_detection.py deleted file mode 100644 index eba6d54f..00000000 --- a/examples/post_processing/spike_detection.py +++ /dev/null @@ -1,39 +0,0 @@ -import os - -import matplotlib.pyplot as plt -import neo -import numpy as np -import quantities as pq -from viziphant.rasterplot import rasterplot_rates - -from miv.io import Data, DataManager -from miv.signal.filter import ButterBandpass -from miv.signal.spike import ThresholdCutoff - - -def main(): - """Example spike detection script""" - - # Load dataset from OpenEphys recording - folder_path: str = "~/Open Ephys/2022-03-10-16-19-09" # Data Path - data_manager = DataManager(folder_path) - - # Get signal and rate(hz) - # signal : np.array, shape(N, N_channels) - # timestamps : np.array - # sampling_rate : float - with data_manager[0].load() as (signal, timestamps, sampling_rate): - # Butter bandpass filter - bandpass_filter = ButterBandpass(300, 3000, order=5) - signal = bandpass_filter(signal, sampling_rate) - - # Spike Detection - detector = ThresholdCutoff(cutoff=4.5) - spiketrains = detector(signal, timestamps, sampling_rate) - - # Plot - rasterplot_rates(spiketrains) - - -if __name__ == "__main__": - main() diff --git a/miv/core/__init__.py b/miv/core/__init__.py new file mode 100644 index 00000000..081fe35a --- /dev/null +++ b/miv/core/__init__.py @@ -0,0 +1 @@ +from miv.core.spiketrain import * diff --git a/miv/core/spiketrain.py b/miv/core/spiketrain.py new file mode 100644 index 00000000..f6c2b187 --- /dev/null +++ b/miv/core/spiketrain.py @@ -0,0 +1,13 @@ +__all__ = ["SpikeTrain"] + +import neo + + +class SpikeTrain(neo.core.SpikeTrain): + """Array of spike times + + Represents spikes emitted by the same unit in a period of times. + """ + + # this is currently identical to neo.core.SpikeTrain + # but may deviate in future diff --git a/miv/datasets/optogenetic.py b/miv/datasets/optogenetic.py index 321a2bbf..154e13bc 100644 --- a/miv/datasets/optogenetic.py +++ b/miv/datasets/optogenetic.py @@ -59,10 +59,6 @@ def load_data(): # pragma: no cover 4: └── Record Node 104/experiment4/recording3 - License - ------- - ... - """ subdir = "optogenetic" diff --git a/miv/datasets/ttl_events.py b/miv/datasets/ttl_events.py new file mode 100644 index 00000000..0ba5f06f --- /dev/null +++ b/miv/datasets/ttl_events.py @@ -0,0 +1,74 @@ +__doc__ = """Sample TTL-input signal readout.""" +__all__ = ["load_data"] + +import gzip +import os + +import numpy as np + +from miv.datasets.utils import get_file +from miv.io import DataManager + + +def load_data(): # pragma: no cover + """ + Loads the sample TTL data readout. `Direct Download `_ + + Total size: 17.4 kB (compressed) + + File hash: a4314442fd9eba4377934c5766971ba1e04f079b7e615b8fb033992323afeb3f + + Examples + -------- + >>> from miv.datasets.ttl_events import load_data + >>> experiments: miv.io.DataManager = load_data() + datasets/ttl_recording/sample_event_recording + 0: + └── Record Node 101/experiment1/recording1 + + + Notes + ----- + All experiment are 1 minute long, 30k Hz recording of optogenetic + neuron cells over 64 channels MEA. Dataset includes 1 spontaneous + recording and 4 stimulated recording. + + Spontaneous recording is the recording over 1 minute period without + external stimulation. The purpose was to measure the baseline mean- + firing rate. + + Stimulation was done by LED light. Over 1 minute (60 seconds) period, + 6 stimulation was done with 10 seconds of intervals. For each stimulation, + LED light was shined over 1 seconds, followed by remaining 9 seconds + of rest (without light). + + Containing experiments: + + * experiment0: TTL recording + + Returns + ------- + dataset: miv.io.DataManager + + Examples + -------- + >>> from miv import datasets + >>> experiments: miv.io.DataManager = datasets.ttl_events.load_data() + + """ + + subdir = "ttl_recording" + base_url = "https://uofi.box.com/shared/static/w3cylplece450up6t6h53vuq93t98q2k.zip" + file = "sample_event_recording.zip" + file_hash = "a4314442fd9eba4377934c5766971ba1e04f079b7e615b8fb033992323afeb3f" + + path = get_file( + file_url=base_url, + directory=subdir, + fname=file, + file_hash=file_hash, + archive_format="zip", + ) + experiment = DataManager(path) + experiment.tree() + return experiment diff --git a/miv/io/binary.py b/miv/io/binary.py index f08c8728..151cd476 100644 --- a/miv/io/binary.py +++ b/miv/io/binary.py @@ -6,7 +6,13 @@ ############### """ -__all__ = ["load_continuous_data", "load_recording", "oebin_read", "apply_channel_mask"] +__all__ = [ + "load_continuous_data", + "load_recording", + "oebin_read", + "apply_channel_mask", + "load_ttl_event", +] from typing import Any, Dict, List, Optional, Sequence, Set, Union @@ -73,8 +79,6 @@ def bits_to_voltage(signal: SignalType, channel_info: Sequence[Dict[str, Any]]): recorded_unit = pq.Quantity([1], channel_info[channel]["units"]) unit_conversion = (recorded_unit / resultant_unit).simplified signal[:, channel] *= bit_to_volt_conversion * unit_conversion - if "ADC" in channel_info[channel]["channel_name"]: - signal[:, channel] *= 10**6 return signal @@ -97,16 +101,117 @@ def oebin_read(file_path: str): return info -def load_recording( +def load_ttl_event( folder: str, - channel_mask: Optional[Set[int]] = None, + return_sample_numbers: bool = False, +): + """ + Loads TTL event data recorded by Open Ephys as numpy arrays. + + `Reference: OpenEphys TTL data structure `_ + + The path should contain: + + - states.npy: N 16-bit integers, indicating ON/OFF (channel number) + - sample_numbers.npy: N 64-bit integers, sample number during acquisition. + - timestamps.npy: N 64-bit floats, global timestamps + - full_words.npy: N 64-bit integer, TTL word of current state of all lines. + + Extra data are retrieved from: + + - structure.oebin: number of channels and sampling rate. + + Parameters + ---------- + folder: str + Folder containing the subfolder 'experiment1'. + return_sample_numbers: bool + If set to true, also return sample_numbers that can be used to re-calculate + synchronization between time series. (default=False) + + Returns + ------- + states : np.ndarray + Numpy integer array, indicating ON/OFF state. (+- channel number) + full_words : np.ndarray + Numpy integer array, consisting current state of all lines. + timestamps : TimestampsType + Numpy float array. Global timestamps in seconds. Relative to start + of the Record Node's main data stream. + sampling_rate: float + Recorded sampling rate + initial_state: int + Initial TTL state across lines. + sample_numbers: Optional[np.ndarray] + Return if `return_sample_numbers` is true. Return array of sample numbers that + records sampled clock count. Typically used to synchronize time array. + + Raises + ------ + AssertionError + No events recorded in data. + + """ + + # Check TTL event recorded + info_file: str = os.path.join(folder, "structure.oebin") + info: Dict[str, Any] = oebin_read(info_file) + version = info["GUI version"] + assert "events" in info.keys(), "No events recorded (TTL)." + ttl_info = [data for data in info["events"] if "TTL Input" in data["channel_name"]] + assert len(ttl_info) > 0, "No events recorded (TTL)." + assert ( + len(ttl_info) == 1 + ), "Multiple TTL input is found, which is not supported yet. (TODO)" + ttl_info = ttl_info[0] + + # Data Structure (OpenEphys Structure) + v_major, v_minor, v_sub = map(int, version.split(".")) + if v_major == 0 and v_minor <= 5: # Legacy file name before 0.6.0 + file_states = "states.npy" + file_timestamps = "synchronized_timestamps.npy" + file_sample_numbers = "timestamps.npy" + file_full_words = "full_words.npy" + else: + file_states = "states.npy" + file_timestamps = "timestamps.npy" + file_sample_numbers = "sample_numbers.npy" + file_full_words = "full_words.npy" + file_path = os.path.join(folder, "events", ttl_info["folder_name"]) + + states = np.load(os.path.join(file_path, file_states)).astype(np.int16) + sample_numbers = np.load(os.path.join(file_path, file_sample_numbers)).astype( + np.int64 + ) + timestamps = np.load(os.path.join(file_path, file_timestamps)).astype(np.float64) + full_words = np.load(os.path.join(file_path, file_full_words)).astype(np.int64) + + # Load from structure.oebin file + sampling_rate: float = ttl_info["sample_rate"] + initial_state: int = ttl_info["initial_state"] + + if return_sample_numbers: + return ( + states, + full_words, + timestamps, + sampling_rate, + initial_state, + sample_numbers, + ) + else: + return states, full_words, timestamps, sampling_rate, initial_state + + +def load_recording( + folder: str, channel_mask: Optional[Set[int]] = None, start_at_zero: bool = True ): """ Loads data recorded by Open Ephys in Binary format as numpy memmap. The path should contain - continuous//continuous.dat: signal (cannot have multiple file) - - continuous//timestamps.dat: timestamps + - continuous//timestamps.npy: timestamps - structure.oebin: number of channels and sampling rate. Parameters @@ -115,6 +220,9 @@ def load_recording( folder containing at least the subfolder 'experiment1'. channel_mask: Set[int], optional Channel index list to ignore in import (default=None) + start_at_zero : bool + If True, the timestamps is adjusted to start at zero. + Note, recorded timestamps might not start at zero for some reason. Returns ------- @@ -129,7 +237,9 @@ def load_recording( """ - file_path: List[str] = glob(os.path.join(folder, "**", "*.dat"), recursive=True) + file_path: List[str] = glob( + os.path.join(folder, "**", "continuous.dat"), recursive=True + ) assert ( len(file_path) == 1 ), f"There should be only one 'continuous.dat' file. (There exists {file_path})" @@ -142,7 +252,9 @@ def load_recording( # channel_info: Dict[str, Any] = info["continuous"][0]["channels"] # TODO: maybe need to support multiple continuous.dat files in the future - signal, timestamps = load_continuous_data(file_path[0], num_channels, sampling_rate) + signal, timestamps = load_continuous_data( + file_path[0], num_channels, sampling_rate, dtype=np.float32 + ) # To Voltage signal = bits_to_voltage(signal, info["continuous"][0]["channels"]) @@ -153,6 +265,10 @@ def load_recording( if channel_mask: signal = apply_channel_mask(signal, channel_mask) + # Adjust timestamps to start from zero + if start_at_zero and not np.isclose(timestamps[0], 0.0): + timestamps -= timestamps[0] + return signal, timestamps, sampling_rate @@ -161,7 +277,8 @@ def load_continuous_data( num_channels: int, sampling_rate: float, timestamps_path: Optional[str] = None, - start_at_zero: bool = True, + dtype: Optional[np.dtype] = None, + _recorded_dtype: Union[np.dtype, str] = "int16", ): """ Load single continous data file and return timestamps and raw data in numpy array. @@ -185,9 +302,11 @@ def load_continuous_data( If None, first check if the file "timestamps.npy" exists on the same directory. If the file doesn't exist, we deduce the timestamps based on the sampling rate and the length of the data. - start_at_zero : bool - If True, the timestamps is adjusted to start at zero. - Note, recorded timestamps might not start at zero for some reason. + dtype: Optional[np.dtype] + If None, skip data-type conversion. If the filesize is too large, it is advisable + to keep `dtype=None` and convert slice by slice. (default=None) + _recorded_dtype: Union[np.dtype, str] + Recorded data type. (default="int16") Returns ------- @@ -205,9 +324,13 @@ def load_continuous_data( """ # Read raw data signal - raw_data: np.ndarray = np.memmap(data_path, dtype="int16", mode="c") + raw_data: np.ndarray = np.memmap( + data_path, dtype=_recorded_dtype, mode="r", order="C" + ) length = raw_data.size // num_channels - raw_data = np.reshape(raw_data, (length, num_channels)).astype("float32") + raw_data = raw_data.reshape(length, num_channels) + if dtype is not None: + raw_data = raw_data.astype(dtype) # Get timestamps_path if timestamps_path is None: @@ -216,13 +339,9 @@ def load_continuous_data( # Get timestamps if os.path.exists(timestamps_path): - timestamps = np.array(np.load(timestamps_path), dtype=np.float64) + timestamps = np.asarray(np.load(timestamps_path), dtype=np.float32) timestamps /= float(sampling_rate) else: # If timestamps_path doesn't exist, deduce the stamps timestamps = np.array(range(0, length)) / sampling_rate - # Adjust timestamps to start from zero - if start_at_zero and not np.isclose(timestamps[0], 0.0): - timestamps -= timestamps[0] - - return np.array(raw_data), timestamps + return raw_data, timestamps diff --git a/miv/io/data.py b/miv/io/data.py index 730142f7..35959f35 100644 --- a/miv/io/data.py +++ b/miv/io/data.py @@ -30,6 +30,7 @@ import logging import os +import pickle from collections.abc import MutableSequence from contextlib import contextmanager from glob import glob @@ -37,7 +38,7 @@ import matplotlib.pyplot as plt import numpy as np -from miv.io.binary import load_continuous_data, load_recording +from miv.io.binary import load_continuous_data, load_recording, load_ttl_event from miv.signal.filter.protocol import FilterProtocol from miv.signal.spike.protocol import SpikeDetectionProtocol from miv.statistics import firing_rates @@ -91,10 +92,21 @@ def __init__( data_path: str, ): self.data_path: str = data_path - self.analysis_path: str = os.path.join(data_path, "analysis") + self._analysis_path: str = os.path.join(data_path, "analysis") self.masking_channel_set: Set[int] = set() - os.makedirs(self.analysis_path, exist_ok=True) + os.makedirs(self._analysis_path, exist_ok=True) + + @property + def analysis_path(self): + """Default sub-directory path to save analysis results""" + return self._analysis_path + + @analysis_path.setter + def analysis_path(self, path): + if not os.path.isdir(path): + os.makedirs(path) + self._analysis_path = path def save_figure( self, @@ -110,6 +122,8 @@ def save_figure( figure : plt.Figure group : str filename : str + savefig_kwargs : Optional[Dict[Any, Any]] + Additional parameters to pass to `plt.savefig`. """ if savefig_kwargs is None: savefig_kwargs = {} @@ -121,11 +135,37 @@ def save_figure( plt.figure(figure) plt.savefig(filepath, **savefig_kwargs) + def save_data( + self, + data, + filename: str, + pkl_kwargs: Optional[Dict[Any, any]] = None, + ): + """Save analysis data into sub-directory + + Parameters + ---------- + data : + filename : str + pkl_kwargs : Optional[Dict[Any, Any]] + Additional parameters to pass to `plt.savefig`. + """ + pkl_kwargs = pkl_kwargs or {} + filepath = os.path.join(self.analysis_path, filename + ".pkl") + with open(filepath, "rb") as output_file: + pickle.dump(data, output_file) + @contextmanager - def load(self): + def load(self, start_at_zero: bool = False): """ Context manager for loading data instantly. + Parameters + ---------- + start_at_zero : bool + If set to True, time first timestamps will be shifted to zero. To achieve synchronized + timestamps with other recordings/events, set this to False. + Examples -------- >>> data = Data(data_path) @@ -151,13 +191,19 @@ def load(self): raise FileNotFoundError("Data directory does not have all necessary files.") try: signal, timestamps, sampling_rate = load_recording( - self.data_path, self.masking_channel_set + self.data_path, self.masking_channel_set, start_at_zero=start_at_zero ) yield signal, timestamps, sampling_rate finally: del timestamps del signal + def load_ttl_event(self): + """ + Load TTL event data if data contains. Detail implementation is :func:`here `. + """ + return load_ttl_event(self.data_path) + def set_channel_mask(self, channel_id: Iterable[int]): """ Set the channel masking. diff --git a/miv/io/file/__init__.py b/miv/io/file/__init__.py new file mode 100644 index 00000000..fe041e65 --- /dev/null +++ b/miv/io/file/__init__.py @@ -0,0 +1,17 @@ +__doc__ = """ +HDF5-based file format for heterogeneous numerical data. +Based on code from and inspired by + +- HEPfile: https://github.com/mattbellis/hepfile +- NeuroH5: https://github.com/iraikov/neuroh5 + +.. automodule:: miv.io.file.read + :members: + +.. automodule:: miv.io.file.write + :members: + +""" + +from miv.io.file.read import * +from miv.io.file.write import * diff --git a/miv/io/file/read.py b/miv/io/file/read.py new file mode 100644 index 00000000..eb89a317 --- /dev/null +++ b/miv/io/file/read.py @@ -0,0 +1,439 @@ +from typing import Any, Dict, List, Optional, Tuple, Union + +from logging import Logger + +import h5py +import numpy as np +from h5py._hl.dataset import Dataset +from numpy import int64, ndarray + + +def read( + filename: str, + groups: Optional[Union[str, List[str]]] = None, + subset: Optional[Union[int, List[int], Tuple[int, int]]] = None, + logger: Optional[Logger] = None, +) -> Tuple[Dict[str, Any], Dict[str, None]]: + """ + Reads all, or a subset of the data, from the HDF5 file to fill a data dictionary. + Returns an empty dictionary to be filled later with data from individual containers. + + Args: + **filename** (string): Name of the input file + + **groups** (list): Dataset groups to be read from input file. + + **subset** (int): Number of containers to be read from input file + + **logger** (logging.Logger): optional logger object + + Returns: + **data** (dict): Selected data from HDF5 file + + **container** (dict): An empty container dictionary to be filled by data from select containers + + """ + + # Open the HDF5 file + infile = None + infile = h5py.File(filename, "r") + + # Create the initial data and container dictionary to hold the data + data: Dict[str, Any] = {} + container: Dict[str, Any] = {} + + data["_MAP_DATASETS_TO_COUNTERS_"] = {} + data["_MAP_DATASETS_TO_INDEX_"] = {} + data["_LIST_OF_COUNTERS_"] = [] + data["_LIST_OF_DATASETS_"] = [] + data["_GROUPS_"] = [] + data["_MAP_DATASETS_TO_GROUPS_"] = {} + + # Get the number of containers + data["_NUMBER_OF_CONTAINERS_"] = infile.attrs["_NUMBER_OF_CONTAINERS_"] + + ncontainers = data["_NUMBER_OF_CONTAINERS_"] + + # Determine if only a subset of the data should be read + subset_: Union[None, List[int]] = None + if subset is not None: + + try: + subset_ = validate_subset(subset, ncontainers) + except RuntimeError: + infile.close() + return data, container + + data["_NUMBER_OF_CONTAINERS_"] = subset_[1] - subset_[0] + ncontainers = data["_NUMBER_OF_CONTAINERS_"] + + if logger is not None: + logger.info( + "Will read in a subset of the file!" + f"From container {subset_[0]} (inclusive) through container {subset_[1]-1} (inclusive)" + f"Container {subset_[1]} is not read in" + f"Reading in {ncontainers} containers\n" + ) + + # Get the datasets and counters + dc = infile["_MAP_DATASETS_TO_COUNTERS_"] + for vals in dc: + + # The decode is there because vals were stored as numpy.bytes + counter = vals[1].decode() + index_name = f"{counter}_INDEX" + data["_MAP_DATASETS_TO_COUNTERS_"][vals[0].decode()] = counter + data["_MAP_DATASETS_TO_INDEX_"][vals[0].decode()] = index_name + data["_LIST_OF_COUNTERS_"].append(vals[1].decode()) + data["_LIST_OF_DATASETS_"].append(vals[0].decode()) + data["_LIST_OF_DATASETS_"].append(vals[1].decode()) # Get the counters as well + + # We may have added some counters and datasets multiple times. + # So just to be sure, only keep the unique values + data["_LIST_OF_COUNTERS_"] = np.unique(data["_LIST_OF_COUNTERS_"]).tolist() + data["_LIST_OF_DATASETS_"] = np.unique(data["_LIST_OF_DATASETS_"]).tolist() + + # Get the list of datasets and groups + all_datasets = data["_LIST_OF_DATASETS_"] + + all_datasets = select_datasets(all_datasets, groups, logger=logger) + + # Pull out the counters and build the indices + # We will need to keep track of the indices in the entire file + # This way, if the user specifies a subset of the data, we have the full + # indices already calculated + full_file_indices = {} + + for counter_name in data["_LIST_OF_COUNTERS_"]: + + full_file_counters = infile[counter_name] + full_file_index = calculate_index_from_counters(full_file_counters) + + # If we passed in subset, grab that slice of the data from the file + if subset_ is not None: + # Add 1 to the high range of subset when we pull out the counters + # and index because in order to get all of the entries for the last entry. + data[counter_name] = infile[counter_name][subset_[0] : subset_[1] + 1] + index: np.ndarray = full_file_index[subset_[0] : subset_[1] + 1] + else: + data[counter_name] = infile[counter_name][:] + index = full_file_index + + subset_index = index + # If the file is *not* empty.... + # Just to make sure the "local" index of the data dictionary starts at 0 + if len(index) > 0: + subset_index = index - index[0] + + index_name = f"{counter_name}_INDEX" + + data[index_name] = subset_index + full_file_indices[index_name] = index + + # Loop over the all_datasets we want and pull out the data. + for name in all_datasets: + + # If this is a counter, we're going to have to grab the indices + # differently than for a "normal" dataset + IS_COUNTER = True + index_name_: Union[None, str] = None + if name not in data["_LIST_OF_COUNTERS_"]: + index_name_ = data["_MAP_DATASETS_TO_INDEX_"][name] + IS_COUNTER = False # We will use different indices for the counters + + dataset = infile[name] + + # This will ignore the groups + if isinstance(dataset, h5py.Dataset): + dataset_name = name + group_name = dataset.attrs.get("_GROUP_", None) + + if subset_ is not None: + if IS_COUNTER: + # If this is a counter, then the subset indices + # map on to the same locations for any counters + lo = subset_[0] + hi = subset_[1] + else: + if index_name_ is not None: + lo = full_file_indices[index_name_][0] + hi = full_file_indices[index_name_][-1] + else: + raise RuntimeError("Unknown index") + data[dataset_name] = dataset[lo:hi] + else: + data[dataset_name] = dataset[:] + if group_name not in data["_GROUPS_"]: + data["_GROUPS_"].append(group_name) + if dataset_name not in data["_MAP_DATASETS_TO_GROUPS_"]: + data["_MAP_DATASETS_TO_GROUPS_"][dataset_name] = group_name + + container[ + dataset_name + ] = None # This will be filled for individual container + + infile.close() + return data, container + + +def select_datasets( + datasets: List[str], + groups: Optional[Union[str, List[str]]] = None, + logger: Optional[Logger] = None, +) -> List[str]: + + # Only keep select data from file, if we have specified datasets + if groups is not None: + + if isinstance(groups, str): + groups = list(groups) + + # Count backwards because we'll be removing stuff as we go. + i = len(datasets) - 1 + while i >= 0: + entry = datasets[i] + + is_dropped = True + # This is looking to see if the string is anywhere in the name + # of the dataset + for group in groups: + if group in entry: + is_dropped = False + break + + if is_dropped: + if logger is not None: + logger.info(f"Not reading out {entry} from the file....") + datasets.remove(entry) + + i -= 1 + + if logger is not None: + logger.debug( + f"After only selecting certain datasets ----- " f"datasets: {datasets}" + ) + + return datasets + + +def validate_subset( + subset: Union[int, List[int], Tuple[int, int]], + ncontainers: int, + logger: Optional[Logger] = None, +) -> List[int]: + + if isinstance(subset, tuple): + subset_ = list(subset) + + elif isinstance(subset, list): + subset_ = subset + + elif isinstance(subset, int): + if logger is not None: + logger.warning( + "Single subset value of {subset} being interpreted as a high range" + f"subset being set to a range of (0,{subset})\n" + ) + subset_ = [0, subset] + else: + raise RuntimeError(f"Unsupported type of subset argument: {subset}") + + # If the user has specified `subset` incorrectly, then let's return + # an empty data and container + if subset_[1] - subset_[0] <= 0: + if logger is not None: + logger.warning( + "The range in subset is either 0 or negative!" + f"{subset_[1]} - {subset_[0]} = {subset_[1] - subset_[0]}" + "Returning an empty data and container dictionary!\n" + ) + raise RuntimeError("Range in subset is 0 or negative") + + if subset_[0] > ncontainers: + if logger is not None: + logger.error( + "Range for subset starts greater than number of containers in file!" + f"{subset_[0]} > {ncontainers}" + ) + raise RuntimeError( + "Range for subset starts greater than number of containers in file!" + ) + + if subset_[1] > ncontainers: + if logger is not None: + logger.warning( + "Range for subset is greater than number of containers in file!" + f"{subset_[1]} > {ncontainers}" + f"High range of subset will be set to {ncontainers}\n" + ) + subset_[1] = ncontainers + + if subset_[1] <= subset_[0]: + if logger is not None: + logger.error( + "Will not be reading anything in!" + f"High range of {subset_[1]} is less than or equal to low range of {subset_[0]}" + ) + raise RuntimeError( + f"High range of {subset_[1]} is less than or equal to low range of {subset_[0]}" + ) + + return subset_ + + +def calculate_index_from_counters(counters: Dataset) -> ndarray: + index = np.add.accumulate(counters) - counters + return index + + +def unpack( + container: Dict[str, Any], + data: Dict[str, Any], + n: int = 0, +) -> None: + + """Fills the container dictionary with selected rows from the data dictionary. + + Args: + + **container** (dict): container dictionary to be filled + + **data** (dict): Data dictionary used to fill the container dictionary + + **n** (integer): 0 by default. Which entry should be pulled out of the data + dictionary and inserted into the container dictionary. + + """ + + keys = container.keys() + + for key in keys: + + # if "num" in key: + if key in data["_LIST_OF_COUNTERS_"]: + container[key] = data[key][n] + + elif "INDEX" not in key: + indexkey = data["_MAP_DATASETS_TO_INDEX_"][key] + numkey = data["_MAP_DATASETS_TO_COUNTERS_"][key] + + if len(data[indexkey]) > 0: + index = data[indexkey][n] + + if len(data[numkey]) > 0: + nobjs = data[numkey][n] + container[key] = data[key][index : index + nobjs] + + +def get_ncontainers_in_file( + filename: str, logger: Optional[Logger] = None +) -> Union[None, int64]: + + """Get the number of containers in the file.""" + + with h5py.File(filename, "r+") as f: + a = f.attrs + + if a.__contains__("_NUMBER_OF_CONTAINERS_"): + _NUMBER_OF_CONTAINERS_ = a.get("_NUMBER_OF_CONTAINERS_") + f.close() + return _NUMBER_OF_CONTAINERS_ + else: + if logger is not None: + logger.warning( + '\nFile does not contain the attribute, "_NUMBER_OF_CONTAINERS_"\n' + ) + f.close() + return None + + +def get_ncontainers_in_data(data, logger=None) -> Union[None, int64]: + + """Get the number of containers in the data dictionary. + + This is useful in case you've only pulled out subsets of the data + + """ + + if not isinstance(data, dict): + if logger is not None: + logger.warning(f"{data} is not a dictionary!\n") + return None + + if "_NUMBER_OF_CONTAINERS_" in list(data.keys()): + _NUMBER_OF_CONTAINERS_ = data["_NUMBER_OF_CONTAINERS_"] + return _NUMBER_OF_CONTAINERS_ + else: + if logger is not None: + logger.warning( + '\ndata dictionary does not contain the key, "_NUMBER_OF_CONTAINERS_"\n' + ) + return None + + +def get_file_metadata(filename: str) -> Union[None, Dict[str, Any]]: + + """Get the file metadata and return it as a dictionary""" + + f = h5py.File(filename, "r+") + + a = f.attrs + + if len(a) < 1: + f.close() + return None + + metadata = {} + + for key in a.keys(): + metadata[key] = a[key] + + f.close() + + return metadata + + +def print_file_metadata(filename: str): + + """Pretty print the file metadata""" + + metadata = get_file_metadata(filename) + + if metadata is None: + return None + + output = "" + + keys = list(metadata.keys()) + + first_keys_to_print = ["date", "_NUMBER_OF_CONTAINERS_"] + + keys_already_printed = [] + + # Print the basics first + for key in first_keys_to_print: + if key in keys: + val = metadata[key] + output += f"{key:<20s} : {val}\n" + keys_already_printed.append(key) + + # Print the versions next + for key in keys: + if key in keys_already_printed: + continue + + if key.find("version") >= 0: + val = metadata[key] + output += f"{key:<20s} : {val}\n" + keys_already_printed.append(key) + + # Print the read of the metadata + for key in keys: + if key in keys_already_printed: + continue + + val = metadata[key] + output += f"{key:<20s} : {val}\n" + keys_already_printed.append(key) + + return output diff --git a/miv/io/file/write.py b/miv/io/file/write.py new file mode 100644 index 00000000..31c7bab1 --- /dev/null +++ b/miv/io/file/write.py @@ -0,0 +1,580 @@ +from typing import Any, Dict, List, Optional, Sequence, Type, Union + +import datetime +import sys +from logging import Logger + +import h5py +import h5py_cache +import numpy as np +from h5py._hl.files import File +from numpy import bytes_ + + +def initialize() -> Dict[str, Any]: + """Creates an empty data dictionary + + Returns: + + **data** (dict): An empty data dictionary + + """ + + data: Dict[str, Any] = {} + + data["_GROUPS_"] = {} + data["_MAP_DATASETS_TO_COUNTERS_"] = {} + data["_LIST_OF_COUNTERS_"] = [] + data["_MAP_DATASETS_TO_DATA_TYPES_"] = {} + data["_GROUP_METADATA_"] = {} + + data["_PROTECTED_NAMES_"] = [ + "_PROTECTED_NAMES_", + "_GROUPS_", + "_MAP_DATASETS_TO_COUNTERS_", + "_MAP_DATASETS_TO_DATA_TYPES_", + "_LIST_OF_COUNTERS_", + "_GROUP_METADATA_", + ] + + return data + + +def clear_container(container: Dict[str, Any]) -> None: + """Clears the data from the container dictionary. + + Args: + **container** (dict): The container to be cleared. + + """ + + for key in container.keys(): + + if key == "_LIST_OF_COUNTERS_": + continue + + if isinstance(container[key], list): + container[key].clear() + elif isinstance(container[key], np.ndarray): + container[key] = [] + elif isinstance(container[key], int): + if key in container["_LIST_OF_COUNTERS_"]: + container[key] = 0 + else: + container[key] = -999 + elif isinstance(container[key], float): + container[key] = np.nan + elif isinstance(container[key], str): + container[key] = "" + + +def create_container( + data: Dict[str, Any], +) -> Dict[str, Any]: + """Creates a container dictionary that will be used to collect data and then + packed into the the master data dictionary. + + Args: + **data** (dict): Data dictionary that will hold all the data from the container. + + Returns: + **container** (dict): The new container dictionary with keys and no container information + + """ + + container: Dict[str, Any] = {} + + for k in data.keys(): + if k in data["_LIST_OF_COUNTERS_"]: + container[k] = 0 + else: + container[k] = data[k].copy() + + return container + + +def create_group( + data: Dict[str, Any], + group_name: str, + metadata: Dict[str, Union[str, int, float]] = {}, + counter: Optional[str] = None, + logger: Optional[Logger] = None, +) -> str: + """Adds a group in the dictionary + + Args: + **data** (dict): Dictionary to which the group will be added + + **group_name** (string): Name of the group to be added + + **counter** (string): Name of the counter key. None by default + + """ + + group_id = group_name.replace("/", "-") + if logger is not None: + logger.warning( + "----------------------------------------------------" + f"Slashes / are not allowed in group names" + f"Replacing / with - in group name {group_name}" + f"The new name will be {group_id}" + "----------------------------------------------------" + ) + # Change name of variable, just to keep code more understandable + counter_id = counter + + # Create a counter_name if the user has not specified one + if counter_id is None: + counter_id = f"N_{group_id}" + if logger is not None: + logger.warning( + "----------------------------------------------------" + f"There is no counter to go with group {group_name}" + f"Creating a counter called {counter_id}" + "-----------------------------------------------------" + ) + + # Check for slashes in the counter name. We can't have them. + if counter_id.find("/") >= 0: + counter_id = counter_id.replace("/", "-") + + keys = data.keys() + + # Then put the group and any datasets in there next. + keyfound = False + for k in keys: + if group_id == k: + if logger is not None: + logger.warning(f"{group_name} is already in the dictionary!") + keyfound = True + break + + if not keyfound: + + data["_GROUPS_"][group_id] = [] + + data["_GROUPS_"][group_id].append(counter_id) + counter_path = f"{group_id}/{counter_id}" + + data["_GROUP_METADATA_"][group_id] = metadata + data["_MAP_DATASETS_TO_COUNTERS_"][group_id] = counter_path + data["_MAP_DATASETS_TO_DATA_TYPES_"][counter_path] = int + + if counter_path not in data["_LIST_OF_COUNTERS_"]: + data["_LIST_OF_COUNTERS_"].append(counter_path) + + data[counter_path] = [] + + return group_id + + +def create_dataset( + data: Dict[str, Any], + datasets: Union[str, List[str]], + group: str, + dtype: Union[Type[int], Type[float], Type[str]] = float, + logger: Optional[Logger] = None, +) -> int: + """Adds a dataset to a group in a dictionary. If the group does not exist, it will be created. + + Args: + **data** (dict): Dictionary that contains the group + + **datasets** (list): Datasets to be added to the group + + **group** (string): Name of group the dataset will be added to. + + **dtype** (type): The data type. float by default. + + Returns: + **-1**: If the group is None + + + """ + + if isinstance(datasets, str): + datasets_: List[str] = [datasets] + else: + datasets_ = datasets + + # Check for slashes in the group name. We can't have them. + for i in range(len(datasets_)): + dataset_name = datasets_[i] + if dataset_name.find("/") >= 0: + new_dataset_name = dataset_name.replace("/", "-") + datasets_[i] = new_dataset_name + if logger is not None: + logger.warning( + "----------------------------------------------------" + f"Slashes / are not allowed in dataset names" + f"Replacing / with - in dataset name {dataset_name}" + f"The new name will be {new_dataset_name}" + "----------------------------------------------------" + ) + + keys = data.keys() + + if group.find("/") >= 0: + group = group.replace("/", "-") + + # Put the counter in the dictionary first. + keyfound = False + for k in data["_GROUPS_"]: + if group == k: + keyfound = True + + if not keyfound: + counter = f"N_{group}" + create_group(data, group, counter=counter) + if logger is not None: + logger.warning( + f"Group {group} is not in the dictionary yet!" + f"Adding it, along with a counter of {counter}" + ) + + # Then put the datasets into the group in there next. + for dataset in datasets_: + keyfound = False + name = f"{group}/{dataset}" + for k in keys: + if name == k: + if logger is not None: + logger.warning(f"{name} is already in the dictionary!") + keyfound = True + if not keyfound: + if logger is not None: + logger.info( + f"Adding dataset {dataset} to the dictionary under group {group}." + ) + data[name] = [] + data["_GROUPS_"][group].append(dataset) + + # Add a counter for this dataset for the group with which it is associated. + counter = data["_MAP_DATASETS_TO_COUNTERS_"][group] + # counter_name = "%s/%s" % (group,counter) + data["_MAP_DATASETS_TO_COUNTERS_"][name] = counter + + data["_MAP_DATASETS_TO_DATA_TYPES_"][name] = dtype + + return 0 + + +def pack( + data: Dict[str, Any], + container: Dict[str, Any], + AUTO_SET_COUNTER: bool = True, + EMPTY_OUT_CONTAINER: bool = True, + STRICT_CHECKING: bool = False, + logger: Optional[Logger] = None, +) -> int: + """Takes the data from an container and packs it into the data dictionary, + intelligently, so that it can be stored and extracted efficiently. + + Args: + **data** (dict): Data dictionary to hold the entire dataset EDIT. + + **container** (dict): container to be packed into data. + + **EMPTY_OUT_CONTAINER** (bool): If this is `True` (default) then empty out the container in preparation + for the next iteration. Useful to disable when debugging and inspecting containers. + + **STRICT_CHECKING** (bool): If `True`, then check that all datasets have the same length, otherwise + + **AUTO_SET_COUNTER** (bool): If `True`, update counter value with length of dataset in container + + """ + + # Calculate the number of entries for each group and set the + # value of that counter. + if AUTO_SET_COUNTER: + for group in data["_GROUPS_"]: + + datasets = data["_GROUPS_"][group] + counter = data["_MAP_DATASETS_TO_COUNTERS_"][group] + + # Here we will calculate the values for the counters, based + # on the size of the datasets + counter_value = None + + # Loop over the datasets + for d in datasets: + full_dataset_name = group + "/" + d + # Skip any counters + if counter == full_dataset_name: + continue + else: + # Grab the size of the first dataset + temp_counter_value = len(container[full_dataset_name]) + + # If we're not STRICT_CHECKING, then use that value for the + # counter and break the loop over the datasets, moving on + # to the next group. + if STRICT_CHECKING is False: + container[counter] = temp_counter_value + break + # Otherwise, we'll check that *all* the datasets have the same + # length. + else: + if counter_value is None: + counter_value = temp_counter_value + container[counter] = temp_counter_value + elif counter_value != temp_counter_value: + # In this case, we found two groups of different length! + # Print this to help the user identify their error + if logger is not None: + logger.warning( + f"Two datasets in group {group} have different sizes!" + ) + for tempd in datasets: + temp_full_dataset_name = group + "/" + tempd + # Don't worry about the dataset + if counter == temp_full_dataset_name: + continue + + # Return a value for the external program to catch. + raise RuntimeError( + f"Two datasets in group {group} have different sizes!" + ) + + # Then pack the container into the data + keys = list(container.keys()) + for key in keys: + + if key in data["_PROTECTED_NAMES_"]: + continue + + if isinstance(container[key], list): + value = container[key] + if len(value) > 0: + data[key] += value + else: + data[key].append(container[key]) + + # Clear out the container after it has been packed + if EMPTY_OUT_CONTAINER: + clear_container(container) + + return 0 + + +def convert_list_and_key_to_string_data(datalist, key): + """Converts data dictionary to a string + + Args: + **datalist** (list): A list to be saved as a string. + + Returns: + **key** (string): We will assume that this will be unpacked as a dictionary, + and this will be the key for the list in that dictionary. + + """ + + a = np.string_(key) + + mydataset = [] + b = np.string_("") + nvals = len(datalist) + for i, val in enumerate(datalist): + b += np.string_(val) + if i < nvals - 1: + b += np.string_("__:__") + mydataset.append([a, b]) + + return mydataset + + +def convert_dict_to_string_data(dictionary: Dict[str, str]) -> List[List[bytes_]]: + """Converts data dictionary to a string + + Args: + **dictionary** (dict): Dictionary to be converted to a string + + Returns: + **mydataset** (string): String representation of the dataset + + """ + + keys = dictionary.keys() + + mydataset = [] + for i, key in enumerate(keys): + a = np.string_(key) + b = np.string_(dictionary[key]) + mydataset.append([a, b]) + + return mydataset + + +def write_metadata( + filename: str, + metadata: Dict[str, str] = {}, + write_default_values: bool = True, + append: bool = True, +) -> File: + + """Writes file metadata in the attributes of an HDF5 file + + Args: + **filename** (string): Name of output file + + **metadata** (dictionary): Metadata desired by user + + **write_default_values** (boolean): True if user wants to write/update the + default metadata: date, hepfile version, + h5py version, numpy version, and Python + version, false if otherwise. + + **append** (boolean): True if user wants to keep older metadata, false otherwise. + + Returns: + **hdoutfile** (HDF5): File with new metadata + + """ + + hdoutfile = h5py.File(filename, "a") + + non_metadata = ["_NUMBER_OF_CONTAINERS_", "_NUMBER_OF_ENTRIES_"] + + if not append: + for key in hdoutfile.attr.keys(): + if key not in non_metadata: + del hdoutfile.attrs[key] + + if write_default_values: + hdoutfile.attrs["date"] = datetime.datetime.now().isoformat(sep=" ") + hdoutfile.attrs["numpy_version"] = np.__version__ + hdoutfile.attrs["h5py_version"] = h5py.__version__ + hdoutfile.attrs["python_version"] = sys.version + + for key in metadata: + hdoutfile.attrs[key] = metadata[key] + + hdoutfile.close() + return hdoutfile + + +def write( + filename: str, + data: Dict[str, Any], + comp_type: Optional[str] = None, + comp_opts: Optional[int] = None, + logger: Optional[Logger] = None, +) -> File: + + """Writes the selected data to an HDF5 file + + Args: + **filename** (string): Name of output file + + **data** (dictionary): Data to be written into output file + + **comp_type** (string): Type of compression + + Returns: + **hdoutfile** (HDF5): File to which the data has been written + + """ + hdoutfile = h5py_cache.File( + filename, "w", libver="latest", chunk_cache_mem_size=1024**3 + ) + + _GROUPS_ = data["_GROUPS_"].keys() + + # Convert this to a 2xN array for writing to the hdf5 file. + # This gives us one small list of information if we need to pull out + # small chunks of data + mydataset = convert_dict_to_string_data(data["_MAP_DATASETS_TO_COUNTERS_"]) + dset = hdoutfile.create_dataset( + "_MAP_DATASETS_TO_COUNTERS_", + data=mydataset, + dtype="S256", + compression=comp_type, + compression_opts=comp_opts, + ) + + # Convert this to a 2xN array for writing to the hdf5 file. + # This has the _GROUPS_ and the datasets in them. + for group in _GROUPS_: + + hdoutfile.create_group(group) + hdoutfile[group].attrs["counter"] = np.string_( + data["_MAP_DATASETS_TO_COUNTERS_"][group] + ) + + metadata = data["_GROUP_METADATA_"][group] + for key in metadata: + val = metadata[key] + if isinstance(val, str): + hval = np.string_(val) + else: + hval = val + hdoutfile[group].attrs[key] = hval + + datasets = data["_GROUPS_"][group] + + for dataset in datasets: + + name = None + name = f"{group}/{dataset}" + + x = data[name] + + dataset_dtype = data["_MAP_DATASETS_TO_DATA_TYPES_"][name] + + if isinstance(x, list): + x = np.asarray(x, dtype=dataset_dtype) + + if logger is not None: + logger.info( + f"Writing dataset {name} to file {name} as type {str(dataset_dtype)}: x.dtype {x.dtype} data.shape = {x.shape}" + ) + + dset = None + if dataset_dtype is not str: + dset = hdoutfile.create_dataset( + name, + data=x, + compression=comp_type, + compression_opts=comp_opts, + dtype=dataset_dtype, + chunks=True, + ) + else: + # For writing strings, ensure strings are ascii and not Unicode + dataset_dtype = h5py.special_dtype(vlen=str) + longest_word = len(max(x, key=len)) + arr = np.array(x, dtype="S" + str(longest_word)) + dset = hdoutfile.create_dataset( + name, + data=arr, + dtype=dataset_dtype, + compression=comp_type, + compression_opts=comp_opts, + ) + dset.attrs["_GROUP_"] = np.string_(group) + + # Get the number of containers + counters = data["_LIST_OF_COUNTERS_"] + _NUMBER_OF_CONTAINERS_ = -1 + prevcounter = None + for i, countername in enumerate(counters): + ncounter = len(data[countername]) + if logger is not None: + logger.debug(f"{countername:<32s} has {ncounter:<12d} entries") + if i > 0 and ncounter != _NUMBER_OF_CONTAINERS_: + if logger is not None: + logger.warning( + f"{countername} and {prevcounter} have differing numbers of entries!" + ) + + if _NUMBER_OF_CONTAINERS_ < ncounter: + _NUMBER_OF_CONTAINERS_ = ncounter + + prevcounter = countername + + hdoutfile.attrs["_NUMBER_OF_CONTAINERS_"] = _NUMBER_OF_CONTAINERS_ + hdoutfile.close() + + write_metadata(filename) + + return hdoutfile diff --git a/miv/io/serial/__init__.py b/miv/io/serial/__init__.py new file mode 100644 index 00000000..8a8c142a --- /dev/null +++ b/miv/io/serial/__init__.py @@ -0,0 +1,2 @@ +from miv.io.serial.arduino import * +from miv.io.serial.stimjim import * diff --git a/miv/io/serial/arduino.py b/miv/io/serial/arduino.py new file mode 100644 index 00000000..7126f722 --- /dev/null +++ b/miv/io/serial/arduino.py @@ -0,0 +1,129 @@ +__doc__ = """ +Basic utilities for serial communication using PySerial package. +The tool is originally developped for experiment using `Stimjim `_ +The purpose is to output spiketrain as pulse signal. + +author: +""" +__all__ = ["ArduinoSerial", "list_serial_ports"] + +import os +import sys +import time + +import serial + + +def list_serial_ports(): + """list serial communication ports available""" + from serial.tools.list_ports import main + + main() + + +class ArduinoSerial: + """ + Stimjim compatible + - Baudrate: 112500 + """ + + def __init__(self, port: str, baudrate: int = 112500): + self._data_started = False + self._data_buf = "" + self._message_complete = False + self.baudrate = baudrate + self.port = port + self.serial_port = None + + def connect(self): + self.serial_port = self._setup_serial(self.baudrate, self.port) + + def _setup_serial( + self, baudrate: int, serial_port_name: str, verbose: bool = False + ): + """Setup serial connection. + + Parameters + ---------- + baudrate : int + serial bits communication rate (bit per sec) + serial_port_name : str + Serial port name. Typically start with "COM". To scan available ports, + run `list_serial_ports`. + verbose : bool + If set to true, print out debugging messages (default=False). + """ + serial_port = serial.Serial( + port=serial_port_name, baudrate=baudrate, timeout=0, rtscts=True + ) + if verbose: + print(f"{serial_port_name=} {baudrate=}") + self.wait() + return serial_port + + @property + def is_open(self): + return self.serial_port.is_open + + def open(self): + self.serial_port.open() + + def close(self): + self.serial_port.close() + + def send( + self, + msg: str, + start_character: str = "", + eol_character: str = "\n", + verbose: bool = False, + ): + # adds the start- and end-markers before sending + full_msg = start_character + msg + eol_character + self.serial_port.write(full_msg.encode("utf-8")) + if verbose: + print(f"Msg send: {full_msg}") + + def receive(self, start_character="", eol_character="\n"): + """receive. + + Parameters + ---------- + start_character : + start_character + eol_character : + eol_character + """ + if self.serial_port.in_waiting() > 0 and not self._message_complete: + x = self.serial_port.read().decode("utf-8") # decode needed for Python3 + + if self._data_started: + if x != eol_character: + self._data_buf = self._data_buf + x + else: + self._data_started = False + self._message_complete = True + elif x == start_character: + self._data_buf = "" + self._data_started = True + + if self._message_complete: + self._message_complete = False + return self._data_buf + else: + return "ready" + + def wait(self, verbose: bool = False): + """ + Allows time for Arduino launch. It also ensures that any bytes left + over from a previous message are discarded + """ + if verbose: + print("Waiting for Arduino to reset") + + msg = "" + prev_msg = "" + while msg.lower().find("ready") == -1: + msg = self.receive() + prev_msg = msg + return prev_msg diff --git a/miv/io/serial/stimjim.py b/miv/io/serial/stimjim.py new file mode 100644 index 00000000..26472fb9 --- /dev/null +++ b/miv/io/serial/stimjim.py @@ -0,0 +1,88 @@ +__doc__ = """ +Stimjim extension. +Basic utilities for translating `spiketrains` into Stimjim pulse generator. + +author: +""" +__all__ = ["StimjimSerial"] + +from typing import List, Optional + +import os +import sys +import time + +import numpy as np +import serial + +from miv.io.serial import ArduinoSerial +from miv.typing import SpiketrainType + + +class StimjimSerial(ArduinoSerial): + """ + Module to control Stimjim using PySerial. + + All time-units are in micro. + All volt-units are in milli. + All ampere-units are in micro. + """ + + def __init__(self, output0_mode=1, output1_mode=3, high_v=4500, low_v=0, **kwargs): + super().__init__(**kwargs) + self.output0_mode = output0_mode + self.output1_mode = output1_mode + self.high_v_1 = high_v + self.low_v_1 = low_v + self.high_v_2 = 0 + self.low_v_2 = 0 + + def send_spiketrain( + self, + pulsetrain: int, + spiketrain: SpiketrainType, + t_max: int, + total_duration: int, + delay: float = 0.0, + channel: int = 0, + ) -> bool: + total_string, total_period = self._spiketrain_to_str(spiketrain, t_max) + total_string.insert( + 0, + self._start_str( + pulsetrain, + self.output0_mode, + self.output1_mode, + total_period, + total_duration, + ), + ) + return "; ".join(total_string) + + def _start_str(self, pulsetrain, output0_mode, output1_mode, period, duration): + return f"S{pulsetrain},{output0_mode},{output1_mode},{period},{duration}" + + def _spiketrain_to_str( + self, spiketrain: SpiketrainType, t_max: int, pulse_length: int = 10_000 + ) -> List[str]: + spiketrain = np.insert(spiketrain, 0, 0) + spiketrain = np.append(spiketrain, t_max) + gaps = np.diff(spiketrain) + if np.any(gaps < pulse_length): + raise ValueError( + f"Gap between pulse must be larger than pulse length. {spiketrain}" + ) + + # String functions + def gap_to_str(x, A1, A2): + return f"{A1},{A2},{x}" + + pulse_to_str = gap_to_str(pulse_length, self.high_v_1, 0) + + total_string = [gap_to_str(gaps[0], self.low_v_1, 0)] # First Gap + for gap in gaps[1:]: + total_string.append(pulse_to_str) + total_string.append(gap_to_str(gap - pulse_length, self.low_v_1, 0)) + + total_period = gaps.sum() + return total_string, total_period diff --git a/miv/statistics/spiketrain_statistics.py b/miv/statistics/spiketrain_statistics.py index 3a79fb9d..fe884a15 100644 --- a/miv/statistics/spiketrain_statistics.py +++ b/miv/statistics/spiketrain_statistics.py @@ -177,9 +177,10 @@ def binned_spiketrain( binned spiketrain with 1 corresponding to spike and zero otherwise """ - + assert t_start < t_end, "End time cannot be smaller or equal to start time" + assert bin_size > 0, "bin size should be greater than 0" n_bins = int((t_end - t_start) / bin_size + 1) - time = np.linspace(t_start, bin_size * (n_bins - 1), n_bins) + time = np.linspace(t_start, t_start + bin_size * (n_bins - 1), n_bins) bin_spike = np.zeros(n_bins) if isinstance(spiketrains[channel], np.ndarray): spike = spiketrains[channel] @@ -222,7 +223,8 @@ def fano_factor( fanofactor for the specified channel and conditions """ - + assert t_start < t_end, "End time cannot be smaller or equal to start time" + assert n_bins > 0, "Number of bins should be a positive integer" bin_spike = binned_spiketrain(spiketrains, channel, t_start, t_end, 0.002) assert np.sum(bin_spike) != 0, "The channel has no spikes" large_bin = [] diff --git a/miv/visualization/connectivity.py b/miv/visualization/connectivity.py index 32b6eaf9..0fd971ca 100644 --- a/miv/visualization/connectivity.py +++ b/miv/visualization/connectivity.py @@ -27,9 +27,8 @@ def plot_connectivity( Returns ------- - figure - matplot figure with bursts plotted for all electrodes - axes + g + graphviz graph object """ connec_x, connec_y = np.shape(connectivity_matrix) @@ -92,7 +91,7 @@ def plot_connectivity( fontsize="15", fontname="Times:Roman bold", ) - return g.view() + return g def plot_connectivity_interactive( @@ -114,9 +113,8 @@ def plot_connectivity_interactive( Returns ------- - figure - matplot figure with bursts plotted for all electrodes - axes + net + pyvis network object """ connec_x, connec_y = np.shape(connectivity_matrix) assert connec_x == connec_y, "Connectivity matrix should be a square matrix" @@ -163,4 +161,4 @@ def plot_connectivity_interactive( for n in net.nodes: n.update({"physics": False}) - return net.show("nodes.html") + return net diff --git a/miv/visualization/waveform.py b/miv/visualization/waveform.py index ffe26cd4..f8253469 100644 --- a/miv/visualization/waveform.py +++ b/miv/visualization/waveform.py @@ -24,7 +24,7 @@ def extract_waveforms( signal: SignalType, spikestamps: SpikestampsType, - channel: int, + channel: Optional[int], sampling_rate: float, pre: pq.Quantity = 0.001 * pq.s, post: pq.Quantity = 0.002 * pq.s, @@ -38,8 +38,8 @@ def extract_waveforms( The signal as a 2-dimensional numpy array (length, num_channel) spikestamps : SpikestampsType The sample index of all spikes as a 1-dim numpy array - channel : int - Interested channel + channel : Optional[int] + Interested channel. If None, assume signal and spikestamps are single channel. sampling_rate : float The sampling frequency in Hz pre : pq.Quantity @@ -53,9 +53,9 @@ def extract_waveforms( Return stacks of spike cutout; shape(n_spikes, width). """ - # TODO: Refactor this part - signal = signal[:, channel] - spikestamps = spikestamps[channel] + if channel is not None: + signal = signal[:, channel] + spikestamps = spikestamps[channel] cutouts = [] pre_idx = int(pre * sampling_rate) diff --git a/poetry.lock b/poetry.lock index b5f9879d..ed06983f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -16,16 +16,16 @@ python-versions = "*" [[package]] name = "astroid" -version = "2.11.7" +version = "2.12.5" description = "An abstract syntax tree for Python with inference support." category = "dev" optional = false -python-versions = ">=3.6.2" +python-versions = ">=3.7.2" [package.dependencies] lazy-object-proxy = ">=1.4.0" typing-extensions = {version = ">=3.10", markers = "python_version < \"3.10\""} -wrapt = ">=1.11,<2" +wrapt = {version = ">=1.11,<2", markers = "python_version < \"3.11\""} [[package]] name = "asttokens" @@ -39,15 +39,7 @@ python-versions = "*" six = "*" [package.extras] -test = ["pytest", "astroid (<=2.5.3)"] - -[[package]] -name = "atomicwrites" -version = "1.4.1" -description = "Atomic file writes." -category = "dev" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +test = ["astroid (<=2.5.3)", "pytest"] [[package]] name = "attrs" @@ -58,10 +50,10 @@ optional = false python-versions = ">=3.5" [package.extras] -dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit", "cloudpickle"] -docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"] -tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "zope.interface", "cloudpickle"] -tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "cloudpickle"] +dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy (>=0.900,!=0.940)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "sphinx", "sphinx-notfound-page", "zope.interface"] +docs = ["furo", "sphinx", "sphinx-notfound-page", "zope.interface"] +tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "zope.interface"] +tests_no_zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins"] [[package]] name = "babel" @@ -99,7 +91,7 @@ lxml = ["lxml"] [[package]] name = "black" -version = "22.6.0" +version = "22.8.0" description = "The uncompromising code formatter." category = "dev" optional = false @@ -124,7 +116,7 @@ name = "certifi" version = "2022.6.15" description = "Python package for providing Mozilla's CA Bundle." category = "main" -optional = true +optional = false python-versions = ">=3.6" [[package]] @@ -151,7 +143,7 @@ name = "charset-normalizer" version = "2.1.1" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." category = "main" -optional = true +optional = false python-versions = ">=3.6.0" [package.extras] @@ -214,6 +206,22 @@ category = "main" optional = false python-versions = ">=3.5" +[[package]] +name = "deepdish" +version = "0.3.7" +description = "Deep Learning experiments from University of Chicago." +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +numpy = "*" +scipy = "*" +tables = "*" + +[package.extras] +image = ["skimage"] + [[package]] name = "dill" version = "0.3.5.1" @@ -227,7 +235,7 @@ graph = ["objgraph (>=1.7.2)"] [[package]] name = "distlib" -version = "0.3.5" +version = "0.3.6" description = "Distribution utilities" category = "dev" optional = false @@ -259,11 +267,11 @@ tqdm = "*" [package.extras] cuda = ["pycuda (>=2020.1)"] -docs = ["numpydoc (>=1.1.0)", "jupyter (>=1.0.0)", "sphinx (>=3.3.0)", "nbsphinx (>=0.8.0)", "sphinxcontrib-bibtex (==1.0.0)", "sphinx-tabs (>=1.3.0)", "matplotlib (>=3.3.2)"] -extras = ["pandas (>=0.18.0)", "scikit-learn (>=0.23.2)", "statsmodels (>=0.12.1)", "jinja2 (>=2.11.2)"] +docs = ["jupyter (>=1.0.0)", "matplotlib (>=3.3.2)", "nbsphinx (>=0.8.0)", "numpydoc (>=1.1.0)", "sphinx (>=3.3.0)", "sphinx-tabs (>=1.3.0)", "sphinxcontrib-bibtex (==1.0.0)"] +extras = ["jinja2 (>=2.11.2)", "pandas (>=0.18.0)", "scikit-learn (>=0.23.2)", "statsmodels (>=0.12.1)"] opencl = ["pyopencl (>=2020.2.2)"] tests = ["pytest"] -tutorials = ["matplotlib (>=3.3.2)", "h5py (>=3.1.0)", "nixio (>=1.5.0)"] +tutorials = ["h5py (>=3.1.0)", "matplotlib (>=3.3.2)", "nixio (>=1.5.0)"] [[package]] name = "entrypoints" @@ -275,7 +283,7 @@ python-versions = ">=3.6" [[package]] name = "executing" -version = "0.10.0" +version = "1.0.0" description = "Get the currently executing AST node of a frame, and other information" category = "main" optional = false @@ -290,7 +298,7 @@ optional = true python-versions = "*" [package.extras] -devel = ["colorama", "jsonschema", "json-spec", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"] +devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"] [[package]] name = "filelock" @@ -301,34 +309,34 @@ optional = false python-versions = ">=3.7" [package.extras] -testing = ["pytest-timeout (>=2.1)", "pytest-cov (>=3)", "pytest (>=7.1.2)", "coverage (>=6.4.2)", "covdefaults (>=2.2)"] -docs = ["sphinx-autodoc-typehints (>=1.19.1)", "sphinx (>=5.1.1)", "furo (>=2022.6.21)"] +docs = ["furo (>=2022.6.21)", "sphinx (>=5.1.1)", "sphinx-autodoc-typehints (>=1.19.1)"] +testing = ["covdefaults (>=2.2)", "coverage (>=6.4.2)", "pytest (>=7.1.2)", "pytest-cov (>=3)", "pytest-timeout (>=2.1)"] [[package]] name = "flake8" -version = "4.0.1" +version = "5.0.4" description = "the modular source code checker: pep8 pyflakes and co" category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.6.1" [package.dependencies] -mccabe = ">=0.6.0,<0.7.0" -pycodestyle = ">=2.8.0,<2.9.0" -pyflakes = ">=2.4.0,<2.5.0" +mccabe = ">=0.7.0,<0.8.0" +pycodestyle = ">=2.9.0,<2.10.0" +pyflakes = ">=2.5.0,<2.6.0" [[package]] name = "fonttools" -version = "4.37.0" +version = "4.37.1" description = "Tools to manipulate font files" category = "main" optional = false python-versions = ">=3.7" [package.extras] -all = ["fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "zopfli (>=0.1.4)", "lz4 (>=1.7.4.2)", "matplotlib", "sympy", "skia-pathops (>=0.5.0)", "uharfbuzz (>=0.23.0)", "brotlicffi (>=0.8.0)", "scipy", "brotli (>=1.0.1)", "munkres", "unicodedata2 (>=14.0.0)", "xattr"] +all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=14.0.0)", "xattr", "zopfli (>=0.1.4)"] graphite = ["lz4 (>=1.7.4.2)"] -interpolatable = ["scipy", "munkres"] +interpolatable = ["munkres", "scipy"] lxml = ["lxml (>=4.0,<5)"] pathops = ["skia-pathops (>=0.5.0)"] plot = ["matplotlib"] @@ -337,7 +345,15 @@ symfont = ["sympy"] type1 = ["xattr"] ufo = ["fs (>=2.2.0,<3)"] unicode = ["unicodedata2 (>=14.0.0)"] -woff = ["zopfli (>=0.1.4)", "brotlicffi (>=0.8.0)", "brotli (>=1.0.1)"] +woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] + +[[package]] +name = "future" +version = "0.18.2" +description = "Clean single-source support for Python 3 and 2" +category = "main" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" [[package]] name = "graphviz" @@ -348,21 +364,40 @@ optional = false python-versions = ">=3.7" [package.extras] -dev = ["tox (>=3)", "flake8", "pep8-naming", "wheel", "twine"] +dev = ["flake8", "pep8-naming", "tox (>=3)", "twine", "wheel"] docs = ["sphinx (>=5)", "sphinx-autodoc-typehints", "sphinx-rtd-theme"] -test = ["pytest (>=7)", "pytest-mock (>=3)", "mock (>=4)", "pytest-cov", "coverage"] +test = ["coverage", "mock (>=4)", "pytest (>=7)", "pytest-cov", "pytest-mock (>=3)"] [[package]] name = "greenlet" -version = "1.1.2" +version = "1.1.3" description = "Lightweight in-process concurrent programming" category = "main" -optional = true +optional = false python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" [package.extras] docs = ["sphinx"] +[[package]] +name = "h5py" +version = "3.7.0" +description = "Read and write HDF5 files from Python" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +numpy = ">=1.14.5" + +[[package]] +name = "h5py-cache" +version = "1.0" +description = "Create h5py File object with specified cache" +category = "main" +optional = false +python-versions = "*" + [[package]] name = "identify" version = "2.5.3" @@ -379,7 +414,7 @@ name = "idna" version = "3.3" description = "Internationalized Domain Names in Applications (IDNA)" category = "main" -optional = true +optional = false python-versions = ">=3.5" [[package]] @@ -402,9 +437,9 @@ python-versions = ">=3.7" zipp = ">=0.5" [package.extras] -docs = ["sphinx", "jaraco.packaging (>=9)", "rst.linker (>=1.9)"] +docs = ["jaraco.packaging (>=9)", "rst.linker (>=1.9)", "sphinx"] perf = ["ipython"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.3)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)", "importlib-resources (>=1.3)"] +testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"] [[package]] name = "importlib-resources" @@ -418,8 +453,8 @@ python-versions = ">=3.7" zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} [package.extras] -docs = ["sphinx", "jaraco.packaging (>=9)", "rst.linker (>=1.9)", "jaraco.tidelift (>=1.4)"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)"] +docs = ["jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx"] +testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] [[package]] name = "iniconfig" @@ -431,7 +466,7 @@ python-versions = "*" [[package]] name = "ipykernel" -version = "6.15.1" +version = "6.15.2" description = "IPython Kernel for Jupyter" category = "main" optional = true @@ -451,7 +486,7 @@ tornado = ">=6.1" traitlets = ">=5.1.0" [package.extras] -test = ["flaky", "ipyparallel", "pre-commit", "pytest-cov", "pytest-timeout", "pytest (>=6.0)"] +test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=6.0)", "pytest-cov", "pytest-timeout"] [[package]] name = "ipython" @@ -476,7 +511,7 @@ stack-data = "*" traitlets = ">=5" [package.extras] -all = ["black", "Sphinx (>=1.3)", "ipykernel", "nbconvert", "nbformat", "ipywidgets", "notebook", "ipyparallel", "qtconsole", "pytest (<7.1)", "pytest-asyncio", "testpath", "curio", "matplotlib (!=3.2.0)", "numpy (>=1.19)", "pandas", "trio"] +all = ["Sphinx (>=1.3)", "black", "curio", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.19)", "pandas", "pytest (<7.1)", "pytest-asyncio", "qtconsole", "testpath", "trio"] black = ["black"] doc = ["Sphinx (>=1.3)"] kernel = ["ipykernel"] @@ -486,7 +521,15 @@ notebook = ["ipywidgets", "notebook"] parallel = ["ipyparallel"] qtconsole = ["qtconsole"] test = ["pytest (<7.1)", "pytest-asyncio", "testpath"] -test_extra = ["pytest (<7.1)", "pytest-asyncio", "testpath", "curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.19)", "pandas", "trio"] +test_extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.19)", "pandas", "pytest (<7.1)", "pytest-asyncio", "testpath", "trio"] + +[[package]] +name = "iso8601" +version = "1.0.2" +description = "Simple module to parse ISO 8601 dates" +category = "main" +optional = false +python-versions = ">=3.6.2,<4.0" [[package]] name = "isort" @@ -500,10 +543,10 @@ python-versions = ">=3.6.1,<4.0" colorama = {version = ">=0.4.3,<0.5.0", optional = true, markers = "extra == \"colors\""} [package.extras] -pipfile_deprecated_finder = ["pipreqs", "requirementslib"] -requirements_deprecated_finder = ["pipreqs", "pip-api"] colors = ["colorama (>=0.4.3,<0.5.0)"] +pipfile_deprecated_finder = ["pipreqs", "requirementslib"] plugins = ["setuptools"] +requirements_deprecated_finder = ["pip-api", "pipreqs"] [[package]] name = "jedi" @@ -551,13 +594,13 @@ optional = false python-versions = ">=2.7" [package.extras] -docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=3.5,!=3.7.3)", "pytest-checkdocs (>=1.2.3)", "pytest-black-multipy", "pytest-cov", "ecdsa", "feedparser", "numpy", "pandas", "pymongo", "scikit-learn", "sqlalchemy", "pytest-flake8 (<1.1.0)", "enum34", "jsonlib", "pytest-flake8 (>=1.1.1)"] +docs = ["jaraco.packaging (>=3.2)", "rst.linker (>=1.9)", "sphinx"] +testing = ["ecdsa", "enum34", "feedparser", "jsonlib", "numpy", "pandas", "pymongo", "pytest (>=3.5,!=3.7.3)", "pytest-black-multipy", "pytest-checkdocs (>=1.2.3)", "pytest-cov", "pytest-flake8 (<1.1.0)", "pytest-flake8 (>=1.1.1)", "scikit-learn", "sqlalchemy"] "testing.libs" = ["simplejson", "ujson", "yajl"] [[package]] name = "jsonschema" -version = "4.14.0" +version = "4.15.0" description = "An implementation of JSON Schema validation for Python" category = "main" optional = true @@ -570,8 +613,8 @@ pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\ pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2" [package.extras] -format-nongpl = ["webcolors (>=1.11)", "uri-template", "rfc3986-validator (>0.1.0)", "rfc3339-validator", "jsonpointer (>1.13)", "isoduration", "idna", "fqdn"] -format = ["webcolors (>=1.11)", "uri-template", "rfc3987", "rfc3339-validator", "jsonpointer (>1.13)", "isoduration", "idna", "fqdn"] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] [[package]] name = "jupyter-cache" @@ -594,12 +637,12 @@ tabulate = "*" [package.extras] cli = ["click-log"] code_style = ["pre-commit (>=2.12,<3.0)"] -rtd = ["nbdime", "jupytext", "myst-nb (>=0.12.3,<0.13.0)", "sphinx-book-theme (>=0.1.1,<0.2.0)", "sphinx-copybutton"] -testing = ["nbdime", "coverage", "ipykernel", "jupytext", "matplotlib", "nbformat (>=5.1)", "numpy", "pandas", "pytest (>=6,<7)", "pytest-cov", "pytest-regressions", "sympy"] +rtd = ["jupytext", "myst-nb (>=0.12.3,<0.13.0)", "nbdime", "sphinx-book-theme (>=0.1.1,<0.2.0)", "sphinx-copybutton"] +testing = ["coverage", "ipykernel", "jupytext", "matplotlib", "nbdime", "nbformat (>=5.1)", "numpy", "pandas", "pytest (>=6,<7)", "pytest-cov", "pytest-regressions", "sympy"] [[package]] name = "jupyter-client" -version = "7.3.4" +version = "7.3.5" description = "Jupyter protocol implementation and client libraries" category = "main" optional = true @@ -611,11 +654,11 @@ jupyter-core = ">=4.9.2" nest-asyncio = ">=1.5.4" python-dateutil = ">=2.8.2" pyzmq = ">=23.0" -tornado = ">=6.0" +tornado = ">=6.2" traitlets = "*" [package.extras] -doc = ["ipykernel", "myst-parser", "sphinx-rtd-theme", "sphinx (>=1.3.6)", "sphinxcontrib-github-alt"] +doc = ["ipykernel", "myst-parser", "sphinx (>=1.3.6)", "sphinx-rtd-theme", "sphinxcontrib-github-alt"] test = ["codecov", "coverage", "ipykernel (>=6.5)", "ipython", "mypy", "pre-commit", "pytest", "pytest-asyncio (>=0.18)", "pytest-cov", "pytest-timeout"] [[package]] @@ -649,12 +692,26 @@ category = "dev" optional = false python-versions = ">=3.6" +[[package]] +name = "lxml" +version = "4.9.1" +description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*" + +[package.extras] +cssselect = ["cssselect (>=0.7)"] +html5 = ["html5lib"] +htmlsoup = ["beautifulsoup4"] +source = ["Cython (>=0.29.7)"] + [[package]] name = "lyon" version = "1.0.0" description = "" category = "main" -optional = false +optional = true python-versions = "*" develop = false @@ -685,7 +742,7 @@ compare = ["commonmark (>=0.9.1,<0.10.0)", "markdown (>=3.3.6,<3.4.0)", "mistlet linkify = ["linkify-it-py (>=1.0,<2.0)"] plugins = ["mdit-py-plugins"] profiling = ["gprof2dot"] -rtd = ["attrs", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx-book-theme"] +rtd = ["attrs", "myst-parser", "pyyaml", "sphinx", "sphinx-book-theme", "sphinx-copybutton", "sphinx-design"] testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] [[package]] @@ -728,11 +785,11 @@ traitlets = "*" [[package]] name = "mccabe" -version = "0.6.1" +version = "0.7.0" description = "McCabe checker, plugin for flake8" category = "dev" optional = false -python-versions = "*" +python-versions = ">=3.6" [[package]] name = "mdit-py-plugins" @@ -809,7 +866,7 @@ typing-extensions = "*" [package.extras] code_style = ["pre-commit (>=2.12,<3.0)"] rtd = ["alabaster", "altair", "bokeh", "coconut (>=1.4.3,<1.5.0)", "ipykernel (>=5.5,<6.0)", "ipywidgets", "jupytext (>=1.11.2,<1.12.0)", "matplotlib", "numpy", "pandas", "plotly", "sphinx-book-theme (>=0.3.0,<0.4.0)", "sphinx-copybutton", "sphinx-design (>=0.1.0,<0.2.0)", "sphinxcontrib-bibtex", "sympy"] -testing = ["coverage (<5.0)", "beautifulsoup4", "ipykernel (>=5.5,<6.0)", "ipython (!=8.1.0)", "ipywidgets", "jupytext (>=1.11.2,<1.12.0)", "matplotlib (>=3.3.0,<3.4.0)", "nbdime", "numpy", "pandas (<1.4)", "pytest (>=5.4,<6.0)", "pytest-cov (>=2.8,<3.0)", "pytest-regressions", "pytest-param-files (>=0.3.3,<0.4.0)", "sympy"] +testing = ["beautifulsoup4", "coverage (<5.0)", "ipykernel (>=5.5,<6.0)", "ipython (!=8.1.0)", "ipywidgets", "jupytext (>=1.11.2,<1.12.0)", "matplotlib (>=3.3.0,<3.4.0)", "nbdime", "numpy", "pandas (<1.4)", "pytest (>=5.4,<6.0)", "pytest-cov (>=2.8,<3.0)", "pytest-param-files (>=0.3.3,<0.4.0)", "pytest-regressions", "sympy"] [[package]] name = "myst-parser" @@ -831,8 +888,8 @@ typing-extensions = "*" [package.extras] code_style = ["pre-commit (>=2.12,<3.0)"] linkify = ["linkify-it-py (>=1.0,<2.0)"] -rtd = ["ipython", "sphinx-book-theme", "sphinx-panels", "sphinxcontrib-bibtex (>=2.4,<3.0)", "sphinxext-rediraffe (>=0.2.7,<0.3.0)", "sphinxcontrib.mermaid (>=0.7.1,<0.8.0)", "sphinxext-opengraph (>=0.6.3,<0.7.0)"] -testing = ["beautifulsoup4", "coverage", "docutils (>=0.17.0,<0.18.0)", "pytest (>=6,<7)", "pytest-cov", "pytest-regressions", "pytest-param-files (>=0.3.4,<0.4.0)"] +rtd = ["ipython", "sphinx-book-theme", "sphinx-panels", "sphinxcontrib-bibtex (>=2.4,<3.0)", "sphinxcontrib.mermaid (>=0.7.1,<0.8.0)", "sphinxext-opengraph (>=0.6.3,<0.7.0)", "sphinxext-rediraffe (>=0.2.7,<0.3.0)"] +testing = ["beautifulsoup4", "coverage", "docutils (>=0.17.0,<0.18.0)", "pytest (>=6,<7)", "pytest-cov", "pytest-param-files (>=0.3.4,<0.4.0)", "pytest-regressions"] [[package]] name = "nbclient" @@ -849,8 +906,8 @@ nest-asyncio = "*" traitlets = ">=5.0.0" [package.extras] -sphinx = ["Sphinx (>=1.7)", "sphinx-book-theme", "mock", "moto", "myst-parser"] -test = ["ipython (<8.0.0)", "ipykernel", "ipywidgets (<8.0.0)", "pytest (>=4.1)", "pytest-asyncio", "pytest-cov (>=2.6.1)", "check-manifest", "flake8", "mypy", "xmltodict", "black", "pip (>=18.1)", "wheel (>=0.31.0)", "setuptools (>=38.6.0)", "twine (>=1.11.0)"] +sphinx = ["Sphinx (>=1.7)", "mock", "moto", "myst-parser", "sphinx-book-theme"] +test = ["black", "check-manifest", "flake8", "ipykernel", "ipython (<8.0.0)", "ipywidgets (<8.0.0)", "mypy", "pip (>=18.1)", "pytest (>=4.1)", "pytest-asyncio", "pytest-cov (>=2.6.1)", "setuptools (>=38.6.0)", "twine (>=1.11.0)", "wheel (>=0.31.0)", "xmltodict"] [[package]] name = "nbformat" @@ -867,7 +924,7 @@ jupyter-core = "*" traitlets = ">=5.1" [package.extras] -test = ["check-manifest", "testpath", "pytest", "pre-commit"] +test = ["check-manifest", "pre-commit", "pytest", "testpath"] [[package]] name = "neo" @@ -883,7 +940,7 @@ quantities = ">=0.12.1" [package.extras] igorproio = ["igor"] -kwikio = ["scipy", "klusta"] +kwikio = ["klusta", "scipy"] neomatlabio = ["scipy (>=1.0.0)"] nixio = ["nixio (>=1.5.0)"] stimfitio = ["stfio"] @@ -906,11 +963,11 @@ optional = false python-versions = ">=3.8" [package.extras] -default = ["numpy (>=1.19)", "scipy (>=1.8)", "matplotlib (>=3.4)", "pandas (>=1.3)"] -developer = ["pre-commit (>=2.20)", "mypy (>=0.961)"] -doc = ["sphinx (>=5)", "pydata-sphinx-theme (>=0.9)", "sphinx-gallery (>=0.10)", "numpydoc (>=1.4)", "pillow (>=9.1)", "nb2plots (>=0.6)", "texext (>=0.6.6)"] -extra = ["lxml (>=4.6)", "pygraphviz (>=1.9)", "pydot (>=1.4.2)", "sympy (>=1.10)"] -test = ["pytest (>=7.1)", "pytest-cov (>=3.0)", "codecov (>=2.1)"] +default = ["matplotlib (>=3.4)", "numpy (>=1.19)", "pandas (>=1.3)", "scipy (>=1.8)"] +developer = ["mypy (>=0.961)", "pre-commit (>=2.20)"] +doc = ["nb2plots (>=0.6)", "numpydoc (>=1.4)", "pillow (>=9.1)", "pydata-sphinx-theme (>=0.9)", "sphinx (>=5)", "sphinx-gallery (>=0.10)", "texext (>=0.6.6)"] +extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.9)", "sympy (>=1.10)"] +test = ["codecov (>=2.1)", "pytest (>=7.1)", "pytest-cov (>=3.0)"] [[package]] name = "nodeenv" @@ -920,6 +977,18 @@ category = "dev" optional = false python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" +[[package]] +name = "numexpr" +version = "2.8.3" +description = "Fast numerical expression evaluator for NumPy" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +numpy = ">=1.13.3" +packaging = "*" + [[package]] name = "numpy" version = "1.23.2" @@ -941,7 +1010,31 @@ Jinja2 = ">=2.10" sphinx = ">=3.0" [package.extras] -testing = ["pytest", "pytest-cov", "matplotlib"] +testing = ["matplotlib", "pytest", "pytest-cov"] + +[[package]] +name = "obspy" +version = "1.3.0" +description = "ObsPy - a Python framework for seismological observatories." +category = "main" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +decorator = "*" +lxml = "*" +matplotlib = ">=3.2.0" +numpy = ">=1.15.0" +requests = "*" +scipy = ">=1.0.0" +sqlalchemy = "*" + +[package.extras] +all = ["cartopy", "geographiclib", "packaging", "pyproj", "pyshp", "pytest", "pytest-json-report"] +geo = ["geographiclib"] +imaging = ["cartopy"] +"io.shapefile" = ["pyshp"] +tests = ["packaging", "pyproj", "pytest", "pytest-json-report"] [[package]] name = "packaging" @@ -956,7 +1049,7 @@ pyparsing = ">=2.0.2,<3.0.5 || >3.0.5" [[package]] name = "pandas" -version = "1.4.3" +version = "1.4.4" description = "Powerful data structures for data analysis, time series, and statistics" category = "main" optional = false @@ -989,11 +1082,11 @@ testing = ["docopt", "pytest (<6.0.0)"] [[package]] name = "pathspec" -version = "0.9.0" +version = "0.10.1" description = "Utility library for gitignore style pattern matching of file paths." category = "dev" optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +python-versions = ">=3.7" [[package]] name = "pexpect" @@ -1043,8 +1136,8 @@ optional = false python-versions = ">=3.7" [package.extras] -docs = ["furo (>=2021.7.5b38)", "proselint (>=0.10.2)", "sphinx-autodoc-typehints (>=1.12)", "sphinx (>=4)"] -test = ["appdirs (==1.4.4)", "pytest-cov (>=2.7)", "pytest-mock (>=3.6)", "pytest (>=6)"] +docs = ["furo (>=2021.7.5b38)", "proselint (>=0.10.2)", "sphinx (>=4)", "sphinx-autodoc-typehints (>=1.12)"] +test = ["appdirs (==1.4.4)", "pytest (>=6)", "pytest-cov (>=2.7)", "pytest-mock (>=3.6)"] [[package]] name = "pluggy" @@ -1089,7 +1182,7 @@ virtualenv = ">=20.0.8" [[package]] name = "prompt-toolkit" -version = "3.0.30" +version = "3.0.31" description = "Library for building powerful interactive command lines in Python" category = "main" optional = false @@ -1107,7 +1200,7 @@ optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [package.extras] -test = ["ipaddress", "mock", "enum34", "pywin32", "wmi"] +test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] [[package]] name = "ptyprocess" @@ -1138,11 +1231,11 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [[package]] name = "pycodestyle" -version = "2.8.0" +version = "2.9.1" description = "Python style guide checker" category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.6" [[package]] name = "pycparser" @@ -1154,7 +1247,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "pydata-sphinx-theme" -version = "0.8.1" +version = "0.9.0" description = "Bootstrap-based Sphinx theme from the PyData community" category = "main" optional = true @@ -1164,13 +1257,13 @@ python-versions = ">=3.7" beautifulsoup4 = "*" docutils = "!=0.17.0" packaging = "*" -sphinx = ">=3.5.4,<5" +sphinx = ">=4.0.2" [package.extras] -doc = ["numpydoc", "myst-parser", "pandas", "pytest", "pytest-regressions", "sphinxext-rediraffe", "sphinx-sitemap", "jupyter-sphinx", "plotly", "numpy", "xarray"] -test = ["pytest", "pydata-sphinx-theme"] -coverage = ["pytest-cov", "codecov", "pydata-sphinx-theme"] -dev = ["pyyaml", "pre-commit", "nox", "pydata-sphinx-theme"] +coverage = ["codecov", "pydata-sphinx-theme", "pytest-cov"] +dev = ["nox", "pre-commit", "pydata-sphinx-theme", "pyyaml"] +doc = ["jupyter-sphinx", "myst-parser", "numpy", "numpydoc", "pandas", "plotly", "pytest", "pytest-regressions", "sphinx-design", "sphinx-sitemap", "sphinxext-rediraffe", "xarray"] +test = ["pydata-sphinx-theme", "pytest"] [[package]] name = "pydocstyle" @@ -1186,13 +1279,24 @@ snowballstemmer = "*" [package.extras] toml = ["toml"] +[[package]] +name = "pyedflib" +version = "0.1.30" +description = "library to read/write EDF+/BDF+ files" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +numpy = ">=1.9.1" + [[package]] name = "pyflakes" -version = "2.4.0" +version = "2.5.0" description = "passive checker of Python programs" category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=3.6" [[package]] name = "pygments" @@ -1218,14 +1322,14 @@ numpy = "*" [[package]] name = "pylint" -version = "2.14.5" +version = "2.15.0" description = "python code static checker" category = "dev" optional = false python-versions = ">=3.7.2" [package.dependencies] -astroid = ">=2.11.6,<=2.12.0-dev0" +astroid = ">=2.12.4,<=2.14.0-dev0" colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} dill = ">=0.2" isort = ">=4.2.5,<6" @@ -1248,7 +1352,7 @@ optional = false python-versions = ">=3.6.8" [package.extras] -diagrams = ["railroad-diagrams", "jinja2"] +diagrams = ["jinja2", "railroad-diagrams"] [[package]] name = "pyrsistent" @@ -1258,16 +1362,35 @@ category = "main" optional = true python-versions = ">=3.7" +[[package]] +name = "pyseries" +version = "1.0.26" +description = "pySeries is a package for statistical analysis of time-series data." +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +deepdish = "*" +matplotlib = "*" +numpy = "*" +obspy = "*" +pandas = "*" +pyedflib = "*" +scipy = "*" +seaborn = "*" +sklearn = "*" +tabulate = "*" + [[package]] name = "pytest" -version = "7.1.2" +version = "7.1.3" description = "pytest: simple powerful testing with Python" category = "dev" optional = false python-versions = ">=3.7" [package.dependencies] -atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""} attrs = ">=19.2.0" colorama = {version = "*", markers = "sys_platform == \"win32\""} iniconfig = "*" @@ -1292,7 +1415,7 @@ coverage = {version = ">=5.2.1", extras = ["toml"]} pytest = ">=4.6" [package.extras] -testing = ["fields", "hunter", "process-tests", "six", "pytest-xdist", "virtualenv"] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"] [[package]] name = "pytest-html" @@ -1421,7 +1544,7 @@ name = "requests" version = "2.28.1" description = "Python HTTP for Humans." category = "main" -optional = true +optional = false python-versions = ">=3.7, <4" [package.dependencies] @@ -1449,14 +1572,14 @@ scipy = ">=1.3.2" threadpoolctl = ">=2.0.0" [package.extras] -tests = ["numpydoc (>=1.2.0)", "pyamg (>=4.0.0)", "mypy (>=0.961)", "black (>=22.3.0)", "flake8 (>=3.8.2)", "pytest-cov (>=2.9.0)", "pytest (>=5.0.1)", "pandas (>=1.0.5)", "scikit-image (>=0.16.2)", "matplotlib (>=3.1.2)"] -examples = ["seaborn (>=0.9.0)", "pandas (>=1.0.5)", "scikit-image (>=0.16.2)", "matplotlib (>=3.1.2)"] -docs = ["sphinxext-opengraph (>=0.4.2)", "sphinx-prompt (>=1.3.0)", "Pillow (>=7.1.2)", "numpydoc (>=1.2.0)", "sphinx-gallery (>=0.7.0)", "sphinx (>=4.0.1)", "memory-profiler (>=0.57.0)", "seaborn (>=0.9.0)", "pandas (>=1.0.5)", "scikit-image (>=0.16.2)", "matplotlib (>=3.1.2)"] -benchmark = ["memory-profiler (>=0.57.0)", "pandas (>=1.0.5)", "matplotlib (>=3.1.2)"] +benchmark = ["matplotlib (>=3.1.2)", "memory-profiler (>=0.57.0)", "pandas (>=1.0.5)"] +docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.1.2)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)", "sphinx (>=4.0.1)", "sphinx-gallery (>=0.7.0)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"] +examples = ["matplotlib (>=3.1.2)", "pandas (>=1.0.5)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)"] +tests = ["black (>=22.3.0)", "flake8 (>=3.8.2)", "matplotlib (>=3.1.2)", "mypy (>=0.961)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "pyamg (>=4.0.0)", "pytest (>=5.0.1)", "pytest-cov (>=2.9.0)", "scikit-image (>=0.16.2)"] [[package]] name = "scipy" -version = "1.9.0" +version = "1.9.1" description = "SciPy: Scientific Library for Python" category = "main" optional = false @@ -1479,6 +1602,19 @@ numpy = ">=1.15" pandas = ">=0.23" scipy = ">=1.0" +[[package]] +name = "serial" +version = "0.0.97" +description = "A framework for serializing/deserializing JSON/YAML/XML into python class instances and vice versa" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +future = ">=0.17.1" +iso8601 = ">=0.1.12" +pyyaml = ">=3.13" + [[package]] name = "setuptools-scm" version = "6.4.2" @@ -1503,6 +1639,17 @@ category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +[[package]] +name = "sklearn" +version = "0.0" +description = "A set of python modules for machine learning and data mining" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +scikit-learn = "*" + [[package]] name = "snowballstemmer" version = "2.2.0" @@ -1549,8 +1696,8 @@ sphinxcontrib-websupport = {version = "*", optional = true, markers = "extra == [package.extras] docs = ["sphinxcontrib-websupport"] -lint = ["flake8 (>=3.5.0)", "isort", "mypy (>=0.931)", "docutils-stubs", "types-typed-ast", "types-requests"] -test = ["pytest", "pytest-cov", "html5lib", "cython", "typed-ast"] +lint = ["docutils-stubs", "flake8 (>=3.5.0)", "isort", "mypy (>=0.931)", "types-requests", "types-typed-ast"] +test = ["cython", "html5lib", "pytest", "pytest-cov", "typed-ast"] [[package]] name = "sphinx-autodoc-typehints" @@ -1567,24 +1714,6 @@ Sphinx = ">=4.5" testing = ["covdefaults (>=2.2)", "coverage (>=6.3)", "diff-cover (>=6.4)", "nptyping (>=2.1.2)", "pytest (>=7.1)", "pytest-cov (>=3)", "sphobjinv (>=2)", "typing-extensions (>=4.1)"] type_comments = ["typed-ast (>=1.5.2)"] -[[package]] -name = "sphinx-book-theme" -version = "0.3.3" -description = "A clean book theme for scientific explanations and documentation with Sphinx" -category = "main" -optional = true -python-versions = ">=3.7" - -[package.dependencies] -pydata-sphinx-theme = ">=0.8.0,<0.9.0" -pyyaml = "*" -sphinx = ">=3,<5" - -[package.extras] -code_style = ["pre-commit (>=2.7.0,<2.8.0)"] -doc = ["ablog (>=0.10.13,<0.11.0)", "ipywidgets", "folium", "numpy", "matplotlib", "numpydoc", "myst-nb (>=0.13.2,<0.14.0)", "nbclient", "pandas", "plotly", "sphinx (>=4.0,<5.0)", "sphinx-design", "sphinx-examples", "sphinx-copybutton", "sphinx-tabs", "sphinx-togglebutton (>=0.2.1)", "sphinx-thebe (>=0.1.1)", "sphinxcontrib-bibtex (>=2.2,<3.0)", "sphinxcontrib-youtube", "sphinxext-opengraph"] -test = ["beautifulsoup4 (>=4.6.1,<5)", "coverage", "myst-nb (>=0.13.2,<0.14.0)", "pytest (>=6.0.1,<6.1.0)", "pytest-cov", "pytest-regressions (>=2.0.1,<2.1.0)", "sphinx-thebe"] - [[package]] name = "sphinx-copybutton" version = "0.5.0" @@ -1598,22 +1727,7 @@ sphinx = ">=1.8" [package.extras] code_style = ["pre-commit (==2.12.1)"] -rtd = ["sphinx", "ipython", "myst-nb", "sphinx-book-theme"] - -[[package]] -name = "sphinx-rtd-theme" -version = "1.0.0" -description = "Read the Docs theme for Sphinx" -category = "main" -optional = true -python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*" - -[package.dependencies] -docutils = "<0.18" -sphinx = ">=1.6" - -[package.extras] -dev = ["transifex-client", "sphinxcontrib-httpdomain", "bump2version"] +rtd = ["ipython", "myst-nb", "sphinx", "sphinx-book-theme"] [[package]] name = "sphinx-togglebutton" @@ -1628,7 +1742,7 @@ docutils = "*" sphinx = "*" [package.extras] -sphinx = ["matplotlib", "numpy", "myst-nb", "sphinx-book-theme", "sphinx-design", "sphinx-examples"] +sphinx = ["matplotlib", "myst-nb", "numpy", "sphinx-book-theme", "sphinx-design", "sphinx-examples"] [[package]] name = "sphinxcontrib-applehelp" @@ -1639,7 +1753,7 @@ optional = true python-versions = ">=3.5" [package.extras] -lint = ["flake8", "mypy", "docutils-stubs"] +lint = ["docutils-stubs", "flake8", "mypy"] test = ["pytest"] [[package]] @@ -1651,7 +1765,7 @@ optional = true python-versions = ">=3.5" [package.extras] -lint = ["flake8", "mypy", "docutils-stubs"] +lint = ["docutils-stubs", "flake8", "mypy"] test = ["pytest"] [[package]] @@ -1663,8 +1777,8 @@ optional = true python-versions = ">=3.6" [package.extras] -lint = ["flake8", "mypy", "docutils-stubs"] -test = ["pytest", "html5lib"] +lint = ["docutils-stubs", "flake8", "mypy"] +test = ["html5lib", "pytest"] [[package]] name = "sphinxcontrib-jsmath" @@ -1675,7 +1789,15 @@ optional = true python-versions = ">=3.5" [package.extras] -test = ["pytest", "flake8", "mypy"] +test = ["flake8", "mypy", "pytest"] + +[[package]] +name = "sphinxcontrib-mermaid" +version = "0.7.1" +description = "Mermaid diagrams in yours Sphinx powered docs" +category = "main" +optional = false +python-versions = "*" [[package]] name = "sphinxcontrib-qthelp" @@ -1686,7 +1808,7 @@ optional = true python-versions = ">=3.5" [package.extras] -lint = ["flake8", "mypy", "docutils-stubs"] +lint = ["docutils-stubs", "flake8", "mypy"] test = ["pytest"] [[package]] @@ -1698,7 +1820,7 @@ optional = true python-versions = ">=3.5" [package.extras] -lint = ["flake8", "mypy", "docutils-stubs"] +lint = ["docutils-stubs", "flake8", "mypy"] test = ["pytest"] [[package]] @@ -1714,43 +1836,43 @@ sphinxcontrib-serializinghtml = "*" [package.extras] lint = ["flake8"] -test = ["pytest", "sqlalchemy", "whoosh", "sphinx"] +test = ["pytest", "sphinx", "sqlalchemy", "whoosh"] [[package]] name = "sqlalchemy" version = "1.4.40" description = "Database Abstraction Library" category = "main" -optional = true +optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" [package.dependencies] greenlet = {version = "!=0.4.17", markers = "python_version >= \"3\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} [package.extras] -aiomysql = ["greenlet (!=0.4.17)", "aiomysql"] -aiosqlite = ["typing_extensions (!=3.10.0.1)", "greenlet (!=0.4.17)", "aiosqlite"] +aiomysql = ["aiomysql", "greenlet (!=0.4.17)"] +aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"] asyncio = ["greenlet (!=0.4.17)"] -asyncmy = ["greenlet (!=0.4.17)", "asyncmy (>=0.2.3,!=0.2.4)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4)", "greenlet (!=0.4.17)"] mariadb_connector = ["mariadb (>=1.0.1,!=1.1.2)"] mssql = ["pyodbc"] mssql_pymssql = ["pymssql"] mssql_pyodbc = ["pyodbc"] -mypy = ["sqlalchemy2-stubs", "mypy (>=0.910)"] -mysql = ["mysqlclient (>=1.4.0,<2)", "mysqlclient (>=1.4.0)"] +mypy = ["mypy (>=0.910)", "sqlalchemy2-stubs"] +mysql = ["mysqlclient (>=1.4.0)", "mysqlclient (>=1.4.0,<2)"] mysql_connector = ["mysql-connector-python"] -oracle = ["cx_oracle (>=7,<8)", "cx_oracle (>=7)"] +oracle = ["cx_oracle (>=7)", "cx_oracle (>=7,<8)"] postgresql = ["psycopg2 (>=2.7)"] -postgresql_asyncpg = ["greenlet (!=0.4.17)", "asyncpg"] +postgresql_asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] postgresql_pg8000 = ["pg8000 (>=1.16.6,!=1.29.0)"] postgresql_psycopg2binary = ["psycopg2-binary"] postgresql_psycopg2cffi = ["psycopg2cffi"] -pymysql = ["pymysql (<1)", "pymysql"] +pymysql = ["pymysql", "pymysql (<1)"] sqlcipher = ["sqlcipher3-binary"] [[package]] name = "stack-data" -version = "0.4.0" +version = "0.5.0" description = "Extract data from python stack frames and tracebacks for informative displays" category = "main" optional = false @@ -1762,14 +1884,30 @@ executing = "*" pure-eval = "*" [package.extras] -tests = ["cython", "littleutils", "pygments", "typeguard", "pytest"] +tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] + +[[package]] +name = "tables" +version = "3.7.0" +description = "Hierarchical datasets for Python" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +numexpr = ">=2.6.2" +numpy = ">=1.19.0" +packaging = "*" + +[package.extras] +doc = ["ipython", "numpydoc", "sphinx (>=1.1)", "sphinx-rtd-theme"] [[package]] name = "tabulate" version = "0.8.10" description = "Pretty-print tabular data" category = "main" -optional = true +optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [package.extras] @@ -1863,10 +2001,10 @@ python-versions = ">=3.6" click = ">=7.1.1,<9.0.0" [package.extras] -test = ["isort (>=5.0.6,<6.0.0)", "black (>=22.3.0,<23.0.0)", "mypy (==0.910)", "pytest-sugar (>=0.9.4,<0.10.0)", "pytest-xdist (>=1.32.0,<2.0.0)", "coverage (>=5.2,<6.0)", "pytest-cov (>=2.10.0,<3.0.0)", "pytest (>=4.4.0,<5.4.0)", "shellingham (>=1.3.0,<2.0.0)"] -doc = ["mdx-include (>=1.4.1,<2.0.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "mkdocs (>=1.1.2,<2.0.0)"] -dev = ["pre-commit (>=2.17.0,<3.0.0)", "flake8 (>=3.8.3,<4.0.0)", "autoflake (>=1.3.1,<2.0.0)"] -all = ["shellingham (>=1.3.0,<2.0.0)", "colorama (>=0.4.3,<0.5.0)"] +all = ["colorama (>=0.4.3,<0.5.0)", "shellingham (>=1.3.0,<2.0.0)"] +dev = ["autoflake (>=1.3.1,<2.0.0)", "flake8 (>=3.8.3,<4.0.0)", "pre-commit (>=2.17.0,<3.0.0)"] +doc = ["mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-material (>=8.1.4,<9.0.0)"] +test = ["black (>=22.3.0,<23.0.0)", "coverage (>=5.2,<6.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.910)", "pytest (>=4.4.0,<5.4.0)", "pytest-cov (>=2.10.0,<3.0.0)", "pytest-sugar (>=0.9.4,<0.10.0)", "pytest-xdist (>=1.32.0,<2.0.0)", "shellingham (>=1.3.0,<2.0.0)"] [[package]] name = "typing-extensions" @@ -1881,17 +2019,17 @@ name = "urllib3" version = "1.26.12" description = "HTTP library with thread-safe connection pooling, file post, and more." category = "main" -optional = true +optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4" [package.extras] -brotli = ["brotlicffi (>=0.8.0)", "brotli (>=1.0.9)", "brotlipy (>=0.6.0)"] -secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "urllib3-secure-extra", "ipaddress"] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] name = "virtualenv" -version = "20.16.3" +version = "20.16.4" description = "Virtual Python Environment builder" category = "dev" optional = false @@ -1947,16 +2085,17 @@ optional = true python-versions = ">=3.7" [package.extras] -docs = ["sphinx", "jaraco.packaging (>=9)", "rst.linker (>=1.9)", "jaraco.tidelift (>=1.4)"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.3)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)"] +docs = ["jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx"] +testing = ["func-timeout", "jaraco.itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] [extras] -docs = ["Sphinx", "sphinx-rtd-theme", "sphinx-book-theme", "readthedocs-sphinx-search", "sphinx-autodoc-typehints", "myst-parser", "myst-nb", "numpydoc", "sphinx-togglebutton", "sphinx-copybutton"] +docs = ["Sphinx", "pydata-sphinx-theme", "readthedocs-sphinx-search", "sphinx-autodoc-typehints", "myst-parser", "myst-nb", "numpydoc", "sphinx-togglebutton", "sphinx-copybutton", "sphinxcontrib-mermaid"] +experiment = ["lyon", "pyseries"] [metadata] lock-version = "1.1" python-versions = ">=3.8,<3.11" -content-hash = "57d22977debd3a434298f3b1ecea19b196bf65d7d79142d3d7f20e625a506c62" +content-hash = "21368950a654049508fa97c635a771ad68e5d389cf4b1de5eed6555996713db1" [metadata.files] alabaster = [ @@ -1967,9 +2106,11 @@ appnope = [ {file = "appnope-0.1.3-py2.py3-none-any.whl", hash = "sha256:265a455292d0bd8a72453494fa24df5a11eb18373a60c7c0430889f22548605e"}, {file = "appnope-0.1.3.tar.gz", hash = "sha256:02bd91c4de869fbb1e1c50aafc4098827a7a54ab2f39d9dcba6c9547ed920e24"}, ] -astroid = [] +astroid = [ + {file = "astroid-2.12.5-py3-none-any.whl", hash = "sha256:d612609242996c4365aeb0345e61edba34363eaaba55f1c0addf6a98f073bef6"}, + {file = "astroid-2.12.5.tar.gz", hash = "sha256:396c88d0a58d7f8daadf730b2ce90838bf338c6752558db719ec6f99c18ec20e"}, +] asttokens = [] -atomicwrites = [] attrs = [] babel = [ {file = "Babel-2.10.3-py3-none-any.whl", hash = "sha256:ff56f4892c1c4bf0d814575ea23471c230d544203c7748e8c68f0089478d48eb"}, @@ -1984,29 +2125,29 @@ beautifulsoup4 = [ {file = "beautifulsoup4-4.11.1.tar.gz", hash = "sha256:ad9aa55b65ef2808eb405f46cf74df7fcb7044d5cbc26487f96eb2ef2e436693"}, ] black = [ - {file = "black-22.6.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f586c26118bc6e714ec58c09df0157fe2d9ee195c764f630eb0d8e7ccce72e69"}, - {file = "black-22.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b270a168d69edb8b7ed32c193ef10fd27844e5c60852039599f9184460ce0807"}, - {file = "black-22.6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6797f58943fceb1c461fb572edbe828d811e719c24e03375fd25170ada53825e"}, - {file = "black-22.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c85928b9d5f83b23cee7d0efcb310172412fbf7cb9d9ce963bd67fd141781def"}, - {file = "black-22.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:f6fe02afde060bbeef044af7996f335fbe90b039ccf3f5eb8f16df8b20f77666"}, - {file = "black-22.6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:cfaf3895a9634e882bf9d2363fed5af8888802d670f58b279b0bece00e9a872d"}, - {file = "black-22.6.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94783f636bca89f11eb5d50437e8e17fbc6a929a628d82304c80fa9cd945f256"}, - {file = "black-22.6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:2ea29072e954a4d55a2ff58971b83365eba5d3d357352a07a7a4df0d95f51c78"}, - {file = "black-22.6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e439798f819d49ba1c0bd9664427a05aab79bfba777a6db94fd4e56fae0cb849"}, - {file = "black-22.6.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:187d96c5e713f441a5829e77120c269b6514418f4513a390b0499b0987f2ff1c"}, - {file = "black-22.6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:074458dc2f6e0d3dab7928d4417bb6957bb834434516f21514138437accdbe90"}, - {file = "black-22.6.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a218d7e5856f91d20f04e931b6f16d15356db1c846ee55f01bac297a705ca24f"}, - {file = "black-22.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:568ac3c465b1c8b34b61cd7a4e349e93f91abf0f9371eda1cf87194663ab684e"}, - {file = "black-22.6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6c1734ab264b8f7929cef8ae5f900b85d579e6cbfde09d7387da8f04771b51c6"}, - {file = "black-22.6.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9a3ac16efe9ec7d7381ddebcc022119794872abce99475345c5a61aa18c45ad"}, - {file = "black-22.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:b9fd45787ba8aa3f5e0a0a98920c1012c884622c6c920dbe98dbd05bc7c70fbf"}, - {file = "black-22.6.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7ba9be198ecca5031cd78745780d65a3f75a34b2ff9be5837045dce55db83d1c"}, - {file = "black-22.6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a3db5b6409b96d9bd543323b23ef32a1a2b06416d525d27e0f67e74f1446c8f2"}, - {file = "black-22.6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:560558527e52ce8afba936fcce93a7411ab40c7d5fe8c2463e279e843c0328ee"}, - {file = "black-22.6.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b154e6bbde1e79ea3260c4b40c0b7b3109ffcdf7bc4ebf8859169a6af72cd70b"}, - {file = "black-22.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:4af5bc0e1f96be5ae9bd7aaec219c901a94d6caa2484c21983d043371c733fc4"}, - {file = "black-22.6.0-py3-none-any.whl", hash = "sha256:ac609cf8ef5e7115ddd07d85d988d074ed00e10fbc3445aee393e70164a2219c"}, - {file = "black-22.6.0.tar.gz", hash = "sha256:6c6d39e28aed379aec40da1c65434c77d75e65bb59a1e1c283de545fb4e7c6c9"}, + {file = "black-22.8.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ce957f1d6b78a8a231b18e0dd2d94a33d2ba738cd88a7fe64f53f659eea49fdd"}, + {file = "black-22.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5107ea36b2b61917956d018bd25129baf9ad1125e39324a9b18248d362156a27"}, + {file = "black-22.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8166b7bfe5dcb56d325385bd1d1e0f635f24aae14b3ae437102dedc0c186747"}, + {file = "black-22.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd82842bb272297503cbec1a2600b6bfb338dae017186f8f215c8958f8acf869"}, + {file = "black-22.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d839150f61d09e7217f52917259831fe2b689f5c8e5e32611736351b89bb2a90"}, + {file = "black-22.8.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a05da0430bd5ced89176db098567973be52ce175a55677436a271102d7eaa3fe"}, + {file = "black-22.8.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a098a69a02596e1f2a58a2a1c8d5a05d5a74461af552b371e82f9fa4ada8342"}, + {file = "black-22.8.0-cp36-cp36m-win_amd64.whl", hash = "sha256:5594efbdc35426e35a7defa1ea1a1cb97c7dbd34c0e49af7fb593a36bd45edab"}, + {file = "black-22.8.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a983526af1bea1e4cf6768e649990f28ee4f4137266921c2c3cee8116ae42ec3"}, + {file = "black-22.8.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b2c25f8dea5e8444bdc6788a2f543e1fb01494e144480bc17f806178378005e"}, + {file = "black-22.8.0-cp37-cp37m-win_amd64.whl", hash = "sha256:78dd85caaab7c3153054756b9fe8c611efa63d9e7aecfa33e533060cb14b6d16"}, + {file = "black-22.8.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:cea1b2542d4e2c02c332e83150e41e3ca80dc0fb8de20df3c5e98e242156222c"}, + {file = "black-22.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5b879eb439094751185d1cfdca43023bc6786bd3c60372462b6f051efa6281a5"}, + {file = "black-22.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0a12e4e1353819af41df998b02c6742643cfef58282915f781d0e4dd7a200411"}, + {file = "black-22.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3a73f66b6d5ba7288cd5d6dad9b4c9b43f4e8a4b789a94bf5abfb878c663eb3"}, + {file = "black-22.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:e981e20ec152dfb3e77418fb616077937378b322d7b26aa1ff87717fb18b4875"}, + {file = "black-22.8.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8ce13ffed7e66dda0da3e0b2eb1bdfc83f5812f66e09aca2b0978593ed636b6c"}, + {file = "black-22.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:32a4b17f644fc288c6ee2bafdf5e3b045f4eff84693ac069d87b1a347d861497"}, + {file = "black-22.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0ad827325a3a634bae88ae7747db1a395d5ee02cf05d9aa7a9bd77dfb10e940c"}, + {file = "black-22.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53198e28a1fb865e9fe97f88220da2e44df6da82b18833b588b1883b16bb5d41"}, + {file = "black-22.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:bc4d4123830a2d190e9cc42a2e43570f82ace35c3aeb26a512a2102bce5af7ec"}, + {file = "black-22.8.0-py3-none-any.whl", hash = "sha256:d2c21d439b2baf7aa80d6dd4e3659259be64c6f49dfd0f32091063db0e006db4"}, + {file = "black-22.8.0.tar.gz", hash = "sha256:792f7eb540ba9a17e8656538701d3eb1afcb134e3b45b71f20b25c77a8db7e6e"}, ] certifi = [ {file = "certifi-2022.6.15-py3-none-any.whl", hash = "sha256:fe86415d55e84719d75f8b69414f6438ac3547d2078ab91b67e779ef69378412"}, @@ -2036,11 +2177,15 @@ decorator = [ {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, ] +deepdish = [] dill = [ {file = "dill-0.3.5.1-py2.py3-none-any.whl", hash = "sha256:33501d03270bbe410c72639b350e941882a8b0fd55357580fbc873fba0c59302"}, {file = "dill-0.3.5.1.tar.gz", hash = "sha256:d75e41f3eff1eee599d738e76ba8f4ad98ea229db8b085318aa2b3333a208c86"}, ] -distlib = [] +distlib = [ + {file = "distlib-0.3.6-py2.py3-none-any.whl", hash = "sha256:f35c4b692542ca110de7ef0bea44d73981caeb34ca0b9b6b2e6d7790dda8f80e"}, + {file = "distlib-0.3.6.tar.gz", hash = "sha256:14bad2d9b04d3a36127ac97f30b12a19268f211063d8f8ee4f47108896e11b46"}, +] docutils = [ {file = "docutils-0.17.1-py2.py3-none-any.whl", hash = "sha256:cf316c8370a737a022b72b56874f6602acf974a37a9fba42ec2876387549fc61"}, {file = "docutils-0.17.1.tar.gz", hash = "sha256:686577d2e4c32380bb50cbb22f575ed742d58168cee37e99117a854bcd88f125"}, @@ -2064,75 +2209,80 @@ entrypoints = [ {file = "entrypoints-0.4-py3-none-any.whl", hash = "sha256:f174b5ff827504fd3cd97cc3f8649f3693f51538c7e4bdf3ef002c8429d42f9f"}, {file = "entrypoints-0.4.tar.gz", hash = "sha256:b706eddaa9218a19ebcd67b56818f05bb27589b1ca9e8d797b74affad4ccacd4"}, ] -executing = [] +executing = [ + {file = "executing-1.0.0-py2.py3-none-any.whl", hash = "sha256:550d581b497228b572235e633599133eeee67073c65914ca346100ad56775349"}, + {file = "executing-1.0.0.tar.gz", hash = "sha256:98daefa9d1916a4f0d944880d5aeaf079e05585689bebd9ff9b32e31dd5e1017"}, +] fastjsonschema = [] filelock = [] flake8 = [ - {file = "flake8-4.0.1-py2.py3-none-any.whl", hash = "sha256:479b1304f72536a55948cb40a32dce8bb0ffe3501e26eaf292c7e60eb5e0428d"}, - {file = "flake8-4.0.1.tar.gz", hash = "sha256:806e034dda44114815e23c16ef92f95c91e4c71100ff52813adf7132a6ad870d"}, + {file = "flake8-5.0.4-py2.py3-none-any.whl", hash = "sha256:7a1cf6b73744f5806ab95e526f6f0d8c01c66d7bbe349562d22dfca20610b248"}, + {file = "flake8-5.0.4.tar.gz", hash = "sha256:6fbe320aad8d6b95cec8b8e47bc933004678dc63095be98528b7bdd2a9f510db"}, ] fonttools = [ - {file = "fonttools-4.37.0-py3-none-any.whl", hash = "sha256:e4467a36e24f918507b5cfdc61d2a479b6c7393a85d1f126f3c84f76e8df2151"}, - {file = "fonttools-4.37.0.zip", hash = "sha256:2ee4509aeba40542a6c6d00895a0c66f3cb8b9edda2fa58438dd9f769e3ce76e"}, + {file = "fonttools-4.37.1-py3-none-any.whl", hash = "sha256:fff6b752e326c15756c819fe2fe7ceab69f96a1dbcfe8911d0941cdb49905007"}, + {file = "fonttools-4.37.1.zip", hash = "sha256:4606e1a88ee1f6699d182fea9511bd9a8a915d913eab4584e5226da1180fcce7"}, ] +future = [] graphviz = [] greenlet = [ - {file = "greenlet-1.1.2-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:58df5c2a0e293bf665a51f8a100d3e9956febfbf1d9aaf8c0677cf70218910c6"}, - {file = "greenlet-1.1.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:aec52725173bd3a7b56fe91bc56eccb26fbdff1386ef123abb63c84c5b43b63a"}, - {file = "greenlet-1.1.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:833e1551925ed51e6b44c800e71e77dacd7e49181fdc9ac9a0bf3714d515785d"}, - {file = "greenlet-1.1.2-cp27-cp27m-win32.whl", hash = "sha256:aa5b467f15e78b82257319aebc78dd2915e4c1436c3c0d1ad6f53e47ba6e2713"}, - {file = "greenlet-1.1.2-cp27-cp27m-win_amd64.whl", hash = "sha256:40b951f601af999a8bf2ce8c71e8aaa4e8c6f78ff8afae7b808aae2dc50d4c40"}, - {file = "greenlet-1.1.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:95e69877983ea39b7303570fa6760f81a3eec23d0e3ab2021b7144b94d06202d"}, - {file = "greenlet-1.1.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:356b3576ad078c89a6107caa9c50cc14e98e3a6c4874a37c3e0273e4baf33de8"}, - {file = "greenlet-1.1.2-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:8639cadfda96737427330a094476d4c7a56ac03de7265622fcf4cfe57c8ae18d"}, - {file = "greenlet-1.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97e5306482182170ade15c4b0d8386ded995a07d7cc2ca8f27958d34d6736497"}, - {file = "greenlet-1.1.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6a36bb9474218c7a5b27ae476035497a6990e21d04c279884eb10d9b290f1b1"}, - {file = "greenlet-1.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abb7a75ed8b968f3061327c433a0fbd17b729947b400747c334a9c29a9af6c58"}, - {file = "greenlet-1.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b336501a05e13b616ef81ce329c0e09ac5ed8c732d9ba7e3e983fcc1a9e86965"}, - {file = "greenlet-1.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:14d4f3cd4e8b524ae9b8aa567858beed70c392fdec26dbdb0a8a418392e71708"}, - {file = "greenlet-1.1.2-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:17ff94e7a83aa8671a25bf5b59326ec26da379ace2ebc4411d690d80a7fbcf23"}, - {file = "greenlet-1.1.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9f3cba480d3deb69f6ee2c1825060177a22c7826431458c697df88e6aeb3caee"}, - {file = "greenlet-1.1.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:fa877ca7f6b48054f847b61d6fa7bed5cebb663ebc55e018fda12db09dcc664c"}, - {file = "greenlet-1.1.2-cp35-cp35m-win32.whl", hash = "sha256:7cbd7574ce8e138bda9df4efc6bf2ab8572c9aff640d8ecfece1b006b68da963"}, - {file = "greenlet-1.1.2-cp35-cp35m-win_amd64.whl", hash = "sha256:903bbd302a2378f984aef528f76d4c9b1748f318fe1294961c072bdc7f2ffa3e"}, - {file = "greenlet-1.1.2-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:049fe7579230e44daef03a259faa24511d10ebfa44f69411d99e6a184fe68073"}, - {file = "greenlet-1.1.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:dd0b1e9e891f69e7675ba5c92e28b90eaa045f6ab134ffe70b52e948aa175b3c"}, - {file = "greenlet-1.1.2-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:7418b6bfc7fe3331541b84bb2141c9baf1ec7132a7ecd9f375912eca810e714e"}, - {file = "greenlet-1.1.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9d29ca8a77117315101425ec7ec2a47a22ccf59f5593378fc4077ac5b754fce"}, - {file = "greenlet-1.1.2-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:21915eb821a6b3d9d8eefdaf57d6c345b970ad722f856cd71739493ce003ad08"}, - {file = "greenlet-1.1.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eff9d20417ff9dcb0d25e2defc2574d10b491bf2e693b4e491914738b7908168"}, - {file = "greenlet-1.1.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b8c008de9d0daba7b6666aa5bbfdc23dcd78cafc33997c9b7741ff6353bafb7f"}, - {file = "greenlet-1.1.2-cp36-cp36m-win32.whl", hash = "sha256:32ca72bbc673adbcfecb935bb3fb1b74e663d10a4b241aaa2f5a75fe1d1f90aa"}, - {file = "greenlet-1.1.2-cp36-cp36m-win_amd64.whl", hash = "sha256:f0214eb2a23b85528310dad848ad2ac58e735612929c8072f6093f3585fd342d"}, - {file = "greenlet-1.1.2-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:b92e29e58bef6d9cfd340c72b04d74c4b4e9f70c9fa7c78b674d1fec18896dc4"}, - {file = "greenlet-1.1.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:fdcec0b8399108577ec290f55551d926d9a1fa6cad45882093a7a07ac5ec147b"}, - {file = "greenlet-1.1.2-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:93f81b134a165cc17123626ab8da2e30c0455441d4ab5576eed73a64c025b25c"}, - {file = "greenlet-1.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e12bdc622676ce47ae9abbf455c189e442afdde8818d9da983085df6312e7a1"}, - {file = "greenlet-1.1.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c790abda465726cfb8bb08bd4ca9a5d0a7bd77c7ac1ca1b839ad823b948ea28"}, - {file = "greenlet-1.1.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f276df9830dba7a333544bd41070e8175762a7ac20350786b322b714b0e654f5"}, - {file = "greenlet-1.1.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c5d5b35f789a030ebb95bff352f1d27a93d81069f2adb3182d99882e095cefe"}, - {file = "greenlet-1.1.2-cp37-cp37m-win32.whl", hash = "sha256:64e6175c2e53195278d7388c454e0b30997573f3f4bd63697f88d855f7a6a1fc"}, - {file = "greenlet-1.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:b11548073a2213d950c3f671aa88e6f83cda6e2fb97a8b6317b1b5b33d850e06"}, - {file = "greenlet-1.1.2-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:9633b3034d3d901f0a46b7939f8c4d64427dfba6bbc5a36b1a67364cf148a1b0"}, - {file = "greenlet-1.1.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:eb6ea6da4c787111adf40f697b4e58732ee0942b5d3bd8f435277643329ba627"}, - {file = "greenlet-1.1.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:f3acda1924472472ddd60c29e5b9db0cec629fbe3c5c5accb74d6d6d14773478"}, - {file = "greenlet-1.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e859fcb4cbe93504ea18008d1df98dee4f7766db66c435e4882ab35cf70cac43"}, - {file = "greenlet-1.1.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00e44c8afdbe5467e4f7b5851be223be68adb4272f44696ee71fe46b7036a711"}, - {file = "greenlet-1.1.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec8c433b3ab0419100bd45b47c9c8551248a5aee30ca5e9d399a0b57ac04651b"}, - {file = "greenlet-1.1.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2bde6792f313f4e918caabc46532aa64aa27a0db05d75b20edfc5c6f46479de2"}, - {file = "greenlet-1.1.2-cp38-cp38-win32.whl", hash = "sha256:288c6a76705dc54fba69fbcb59904ae4ad768b4c768839b8ca5fdadec6dd8cfd"}, - {file = "greenlet-1.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:8d2f1fb53a421b410751887eb4ff21386d119ef9cde3797bf5e7ed49fb51a3b3"}, - {file = "greenlet-1.1.2-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:166eac03e48784a6a6e0e5f041cfebb1ab400b394db188c48b3a84737f505b67"}, - {file = "greenlet-1.1.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:572e1787d1460da79590bf44304abbc0a2da944ea64ec549188fa84d89bba7ab"}, - {file = "greenlet-1.1.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:be5f425ff1f5f4b3c1e33ad64ab994eed12fc284a6ea71c5243fd564502ecbe5"}, - {file = "greenlet-1.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1692f7d6bc45e3200844be0dba153612103db241691088626a33ff1f24a0d88"}, - {file = "greenlet-1.1.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7227b47e73dedaa513cdebb98469705ef0d66eb5a1250144468e9c3097d6b59b"}, - {file = "greenlet-1.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ff61ff178250f9bb3cd89752df0f1dd0e27316a8bd1465351652b1b4a4cdfd3"}, - {file = "greenlet-1.1.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0051c6f1f27cb756ffc0ffbac7d2cd48cb0362ac1736871399a739b2885134d3"}, - {file = "greenlet-1.1.2-cp39-cp39-win32.whl", hash = "sha256:f70a9e237bb792c7cc7e44c531fd48f5897961701cdaa06cf22fc14965c496cf"}, - {file = "greenlet-1.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:013d61294b6cd8fe3242932c1c5e36e5d1db2c8afb58606c5a67efce62c1f5fd"}, - {file = "greenlet-1.1.2.tar.gz", hash = "sha256:e30f5ea4ae2346e62cedde8794a56858a67b878dd79f7df76a0767e356b1744a"}, -] + {file = "greenlet-1.1.3-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:8c287ae7ac921dfde88b1c125bd9590b7ec3c900c2d3db5197f1286e144e712b"}, + {file = "greenlet-1.1.3-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:870a48007872d12e95a996fca3c03a64290d3ea2e61076aa35d3b253cf34cd32"}, + {file = "greenlet-1.1.3-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:7c5227963409551ae4a6938beb70d56bf1918c554a287d3da6853526212fbe0a"}, + {file = "greenlet-1.1.3-cp27-cp27m-win32.whl", hash = "sha256:9fae214f6c43cd47f7bef98c56919b9222481e833be2915f6857a1e9e8a15318"}, + {file = "greenlet-1.1.3-cp27-cp27m-win_amd64.whl", hash = "sha256:de431765bd5fe62119e0bc6bc6e7b17ac53017ae1782acf88fcf6b7eae475a49"}, + {file = "greenlet-1.1.3-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:510c3b15587afce9800198b4b142202b323bf4b4b5f9d6c79cb9a35e5e3c30d2"}, + {file = "greenlet-1.1.3-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:9951dcbd37850da32b2cb6e391f621c1ee456191c6ae5528af4a34afe357c30e"}, + {file = "greenlet-1.1.3-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:07c58e169bbe1e87b8bbf15a5c1b779a7616df9fd3e61cadc9d691740015b4f8"}, + {file = "greenlet-1.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df02fdec0c533301497acb0bc0f27f479a3a63dcdc3a099ae33a902857f07477"}, + {file = "greenlet-1.1.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c88e134d51d5e82315a7c32b914a58751b7353eb5268dbd02eabf020b4c4700"}, + {file = "greenlet-1.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b41d19c0cfe5c259fe6c539fd75051cd39a5d33d05482f885faf43f7f5e7d26"}, + {file = "greenlet-1.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:6f5d4b2280ceea76c55c893827961ed0a6eadd5a584a7c4e6e6dd7bc10dfdd96"}, + {file = "greenlet-1.1.3-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:184416e481295832350a4bf731ba619a92f5689bf5d0fa4341e98b98b1265bd7"}, + {file = "greenlet-1.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd0404d154084a371e6d2bafc787201612a1359c2dee688ae334f9118aa0bf47"}, + {file = "greenlet-1.1.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7a43bbfa9b6cfdfaeefbd91038dde65ea2c421dc387ed171613df340650874f2"}, + {file = "greenlet-1.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce5b64dfe8d0cca407d88b0ee619d80d4215a2612c1af8c98a92180e7109f4b5"}, + {file = "greenlet-1.1.3-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:903fa5716b8fbb21019268b44f73f3748c41d1a30d71b4a49c84b642c2fed5fa"}, + {file = "greenlet-1.1.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:0118817c9341ef2b0f75f5af79ac377e4da6ff637e5ee4ac91802c0e379dadb4"}, + {file = "greenlet-1.1.3-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:466ce0928e33421ee84ae04c4ac6f253a3a3e6b8d600a79bd43fd4403e0a7a76"}, + {file = "greenlet-1.1.3-cp35-cp35m-win32.whl", hash = "sha256:65ad1a7a463a2a6f863661329a944a5802c7129f7ad33583dcc11069c17e622c"}, + {file = "greenlet-1.1.3-cp35-cp35m-win_amd64.whl", hash = "sha256:7532a46505470be30cbf1dbadb20379fb481244f1ca54207d7df3bf0bbab6a20"}, + {file = "greenlet-1.1.3-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:caff52cb5cd7626872d9696aee5b794abe172804beb7db52eed1fd5824b63910"}, + {file = "greenlet-1.1.3-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:db41f3845eb579b544c962864cce2c2a0257fe30f0f1e18e51b1e8cbb4e0ac6d"}, + {file = "greenlet-1.1.3-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:e8533f5111704d75de3139bf0b8136d3a6c1642c55c067866fa0a51c2155ee33"}, + {file = "greenlet-1.1.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9537e4baf0db67f382eb29255a03154fcd4984638303ff9baaa738b10371fa57"}, + {file = "greenlet-1.1.3-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8bfd36f368efe0ab2a6aa3db7f14598aac454b06849fb633b762ddbede1db90"}, + {file = "greenlet-1.1.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0877a9a2129a2c56a2eae2da016743db7d9d6a05d5e1c198f1b7808c602a30e"}, + {file = "greenlet-1.1.3-cp36-cp36m-win32.whl", hash = "sha256:88b04e12c9b041a1e0bcb886fec709c488192638a9a7a3677513ac6ba81d8e79"}, + {file = "greenlet-1.1.3-cp36-cp36m-win_amd64.whl", hash = "sha256:4f166b4aca8d7d489e82d74627a7069ab34211ef5ebb57c300ec4b9337b60fc0"}, + {file = "greenlet-1.1.3-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:cd16a89efe3a003029c87ff19e9fba635864e064da646bc749fc1908a4af18f3"}, + {file = "greenlet-1.1.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:5b756e6730ea59b2745072e28ad27f4c837084688e6a6b3633c8b1e509e6ae0e"}, + {file = "greenlet-1.1.3-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:9b2f7d0408ddeb8ea1fd43d3db79a8cefaccadd2a812f021333b338ed6b10aba"}, + {file = "greenlet-1.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44b4817c34c9272c65550b788913620f1fdc80362b209bc9d7dd2f40d8793080"}, + {file = "greenlet-1.1.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d58a5a71c4c37354f9e0c24c9c8321f0185f6945ef027460b809f4bb474bfe41"}, + {file = "greenlet-1.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1dd51d2650e70c6c4af37f454737bf4a11e568945b27f74b471e8e2a9fd21268"}, + {file = "greenlet-1.1.3-cp37-cp37m-win32.whl", hash = "sha256:048d2bed76c2aa6de7af500ae0ea51dd2267aec0e0f2a436981159053d0bc7cc"}, + {file = "greenlet-1.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:77e41db75f9958f2083e03e9dd39da12247b3430c92267df3af77c83d8ff9eed"}, + {file = "greenlet-1.1.3-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:1626185d938d7381631e48e6f7713e8d4b964be246073e1a1d15c2f061ac9f08"}, + {file = "greenlet-1.1.3-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:1ec2779774d8e42ed0440cf8bc55540175187e8e934f2be25199bf4ed948cd9e"}, + {file = "greenlet-1.1.3-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:f2f908239b7098799b8845e5936c2ccb91d8c2323be02e82f8dcb4a80dcf4a25"}, + {file = "greenlet-1.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b181e9aa6cb2f5ec0cacc8cee6e5a3093416c841ba32c185c30c160487f0380"}, + {file = "greenlet-1.1.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2cf45e339cabea16c07586306a31cfcc5a3b5e1626d365714d283732afed6809"}, + {file = "greenlet-1.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6200a11f003ec26815f7e3d2ded01b43a3810be3528dd760d2f1fa777490c3cd"}, + {file = "greenlet-1.1.3-cp38-cp38-win32.whl", hash = "sha256:db5b25265010a1b3dca6a174a443a0ed4c4ab12d5e2883a11c97d6e6d59b12f9"}, + {file = "greenlet-1.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:095a980288fe05adf3d002fbb180c99bdcf0f930e220aa66fcd56e7914a38202"}, + {file = "greenlet-1.1.3-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:cbc1eb55342cbac8f7ec159088d54e2cfdd5ddf61c87b8bbe682d113789331b2"}, + {file = "greenlet-1.1.3-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:694ffa7144fa5cc526c8f4512665003a39fa09ef00d19bbca5c8d3406db72fbe"}, + {file = "greenlet-1.1.3-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:aa741c1a8a8cc25eb3a3a01a62bdb5095a773d8c6a86470bde7f607a447e7905"}, + {file = "greenlet-1.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3a669f11289a8995d24fbfc0e63f8289dd03c9aaa0cc8f1eab31d18ca61a382"}, + {file = "greenlet-1.1.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76a53bfa10b367ee734b95988bd82a9a5f0038a25030f9f23bbbc005010ca600"}, + {file = "greenlet-1.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fb0aa7f6996879551fd67461d5d3ab0c3c0245da98be90c89fcb7a18d437403"}, + {file = "greenlet-1.1.3-cp39-cp39-win32.whl", hash = "sha256:5fbe1ab72b998ca77ceabbae63a9b2e2dc2d963f4299b9b278252ddba142d3f1"}, + {file = "greenlet-1.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:ffe73f9e7aea404722058405ff24041e59d31ca23d1da0895af48050a07b6932"}, + {file = "greenlet-1.1.3.tar.gz", hash = "sha256:bcb6c6dd1d6be6d38d6db283747d07fda089ff8c559a835236560a4410340455"}, +] +h5py = [] +h5py-cache = [] identify = [] idna = [ {file = "idna-3.3-py3-none-any.whl", hash = "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff"}, @@ -2151,11 +2301,15 @@ iniconfig = [ {file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"}, {file = "iniconfig-1.1.1.tar.gz", hash = "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"}, ] -ipykernel = [] +ipykernel = [ + {file = "ipykernel-6.15.2-py3-none-any.whl", hash = "sha256:59183ef833b82c72211aace3fb48fd20eae8e2d0cae475f3d5c39d4a688e81ec"}, + {file = "ipykernel-6.15.2.tar.gz", hash = "sha256:e7481083b438609c9c8a22d6362e8e1bc6ec94ba0741b666941e634f2d61bdf3"}, +] ipython = [ {file = "ipython-8.4.0-py3-none-any.whl", hash = "sha256:7ca74052a38fa25fe9bedf52da0be7d3fdd2fb027c3b778ea78dfe8c212937d1"}, {file = "ipython-8.4.0.tar.gz", hash = "sha256:f2db3a10254241d9b447232cec8b424847f338d9d36f9a577a6192c332a46abd"}, ] +iso8601 = [] isort = [ {file = "isort-5.10.1-py3-none-any.whl", hash = "sha256:6f62d78e2f89b4500b080fe3a81690850cd254227f27f75c3a0c491a1f351ba7"}, {file = "isort-5.10.1.tar.gz", hash = "sha256:e8443a5e7a020e9d7f97f1d7d9cd17c88bcb3bc7e218bf9cf5095fe550be2951"}, @@ -2174,16 +2328,16 @@ joblib = [ ] jsonpickle = [] jsonschema = [ - {file = "jsonschema-4.14.0-py3-none-any.whl", hash = "sha256:9892b8d630a82990521a9ca630d3446bd316b5ad54dbe981338802787f3e0d2d"}, - {file = "jsonschema-4.14.0.tar.gz", hash = "sha256:15062f4cc6f591400cd528d2c355f2cfa6a57e44c820dc783aee5e23d36a831f"}, + {file = "jsonschema-4.15.0-py3-none-any.whl", hash = "sha256:2df0fab225abb3b41967bb3a46fd37dc74b1536b5296d0b1c2078cd072adf0f7"}, + {file = "jsonschema-4.15.0.tar.gz", hash = "sha256:21f4979391bdceb044e502fd8e79e738c0cdfbdc8773f9a49b5769461e82fe1e"}, ] jupyter-cache = [ {file = "jupyter-cache-0.5.0.tar.gz", hash = "sha256:87408030a4c8c14fe3f8fe62e6ceeb24c84e544c7ced20bfee45968053d07801"}, {file = "jupyter_cache-0.5.0-py3-none-any.whl", hash = "sha256:642e434b9b75c4b94dc8346eaf5a639c8926a0673b87e5e8ef6460d5cf2c9516"}, ] jupyter-client = [ - {file = "jupyter_client-7.3.4-py3-none-any.whl", hash = "sha256:17d74b0d0a7b24f1c8c527b24fcf4607c56bee542ffe8e3418e50b21e514b621"}, - {file = "jupyter_client-7.3.4.tar.gz", hash = "sha256:aa9a6c32054b290374f95f73bb0cae91455c58dfb84f65c8591912b8f65e6d56"}, + {file = "jupyter_client-7.3.5-py3-none-any.whl", hash = "sha256:b33222bdc9dd1714228bd286af006533a0abe2bbc093e8f3d29dc0b91bdc2be4"}, + {file = "jupyter_client-7.3.5.tar.gz", hash = "sha256:3c58466a1b8d55dba0bf3ce0834e4f5b7760baf98d1d73db0add6f19de9ecd1d"}, ] jupyter-core = [] kiwisolver = [ @@ -2270,6 +2424,7 @@ lazy-object-proxy = [ {file = "lazy_object_proxy-1.7.1-cp39-cp39-win_amd64.whl", hash = "sha256:677ea950bef409b47e51e733283544ac3d660b709cfce7b187f5ace137960d61"}, {file = "lazy_object_proxy-1.7.1-pp37.pp38-none-any.whl", hash = "sha256:d66906d5785da8e0be7360912e99c9188b70f52c422f9fc18223347235691a84"}, ] +lxml = [] lyon = [] markdown-it-py = [ {file = "markdown-it-py-2.1.0.tar.gz", hash = "sha256:cf7e59fed14b5ae17c0006eff14a2d9a00ed5f3a846148153899a0224e2c07da"}, @@ -2320,8 +2475,8 @@ markupsafe = [ matplotlib = [] matplotlib-inline = [] mccabe = [ - {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"}, - {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"}, + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, ] mdit-py-plugins = [ {file = "mdit-py-plugins-0.3.0.tar.gz", hash = "sha256:ecc24f51eeec6ab7eecc2f9724e8272c2fb191c2e93cf98109120c2cace69750"}, @@ -2392,45 +2547,47 @@ nodeenv = [ {file = "nodeenv-1.7.0-py2.py3-none-any.whl", hash = "sha256:27083a7b96a25f2f5e1d8cb4b6317ee8aeda3bdd121394e5ac54e498028a042e"}, {file = "nodeenv-1.7.0.tar.gz", hash = "sha256:e0e7f7dfb85fc5394c6fe1e8fa98131a2473e04311a45afb6508f7cf1836fa2b"}, ] +numexpr = [] numpy = [] numpydoc = [ {file = "numpydoc-1.4.0-py3-none-any.whl", hash = "sha256:fd26258868ebcc75c816fe68e1d41e3b55bd410941acfb969dee3eef6e5cf260"}, {file = "numpydoc-1.4.0.tar.gz", hash = "sha256:9494daf1c7612f59905fa09e65c9b8a90bbacb3804d91f7a94e778831e6fcfa5"}, ] +obspy = [] packaging = [ {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"}, {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"}, ] pandas = [ - {file = "pandas-1.4.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d51674ed8e2551ef7773820ef5dab9322be0828629f2cbf8d1fc31a0c4fed640"}, - {file = "pandas-1.4.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:16ad23db55efcc93fa878f7837267973b61ea85d244fc5ff0ccbcfa5638706c5"}, - {file = "pandas-1.4.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:958a0588149190c22cdebbc0797e01972950c927a11a900fe6c2296f207b1d6f"}, - {file = "pandas-1.4.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e48fbb64165cda451c06a0f9e4c7a16b534fcabd32546d531b3c240ce2844112"}, - {file = "pandas-1.4.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f803320c9da732cc79210d7e8cc5c8019aad512589c910c66529eb1b1818230"}, - {file = "pandas-1.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:2893e923472a5e090c2d5e8db83e8f907364ec048572084c7d10ef93546be6d1"}, - {file = "pandas-1.4.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:24ea75f47bbd5574675dae21d51779a4948715416413b30614c1e8b480909f81"}, - {file = "pandas-1.4.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d5ebc990bd34f4ac3c73a2724c2dcc9ee7bf1ce6cf08e87bb25c6ad33507e318"}, - {file = "pandas-1.4.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d6c0106415ff1a10c326c49bc5dd9ea8b9897a6ca0c8688eb9c30ddec49535ef"}, - {file = "pandas-1.4.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78b00429161ccb0da252229bcda8010b445c4bf924e721265bec5a6e96a92e92"}, - {file = "pandas-1.4.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dfbf16b1ea4f4d0ee11084d9c026340514d1d30270eaa82a9f1297b6c8ecbf0"}, - {file = "pandas-1.4.3-cp38-cp38-win32.whl", hash = "sha256:48350592665ea3cbcd07efc8c12ff12d89be09cd47231c7925e3b8afada9d50d"}, - {file = "pandas-1.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:605d572126eb4ab2eadf5c59d5d69f0608df2bf7bcad5c5880a47a20a0699e3e"}, - {file = "pandas-1.4.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a3924692160e3d847e18702bb048dc38e0e13411d2b503fecb1adf0fcf950ba4"}, - {file = "pandas-1.4.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:07238a58d7cbc8a004855ade7b75bbd22c0db4b0ffccc721556bab8a095515f6"}, - {file = "pandas-1.4.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:755679c49460bd0d2f837ab99f0a26948e68fa0718b7e42afbabd074d945bf84"}, - {file = "pandas-1.4.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41fc406e374590a3d492325b889a2686b31e7a7780bec83db2512988550dadbf"}, - {file = "pandas-1.4.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d9382f72a4f0e93909feece6fef5500e838ce1c355a581b3d8f259839f2ea76"}, - {file = "pandas-1.4.3-cp39-cp39-win32.whl", hash = "sha256:0daf876dba6c622154b2e6741f29e87161f844e64f84801554f879d27ba63c0d"}, - {file = "pandas-1.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:721a3dd2f06ef942f83a819c0f3f6a648b2830b191a72bbe9451bcd49c3bd42e"}, - {file = "pandas-1.4.3.tar.gz", hash = "sha256:2ff7788468e75917574f080cd4681b27e1a7bf36461fe968b49a87b5a54d007c"}, + {file = "pandas-1.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:799e6a25932df7e6b1f8dabf63de064e2205dc309abb75956126a0453fd88e97"}, + {file = "pandas-1.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7cd1d69a387f7d5e1a5a06a87574d9ef2433847c0e78113ab51c84d3a8bcaeaa"}, + {file = "pandas-1.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:94f2ed1fd51e545ebf71da1e942fe1822ee01e10d3dd2a7276d01351333b7c6b"}, + {file = "pandas-1.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4591cadd06fbbbd16fafc2de6e840c1aaefeae3d5864b688004777ef1bbdede3"}, + {file = "pandas-1.4.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0022fe6a313df1c4869b5edc012d734c6519a6fffa3cf70930f32e6a1078e49"}, + {file = "pandas-1.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:785e878a6e6d8ddcdb8c181e600855402750052497d7fc6d6b508894f6b8830b"}, + {file = "pandas-1.4.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c4bb8b0ab9f94207d07e401d24baebfc63057246b1a5e0cd9ee50df85a656871"}, + {file = "pandas-1.4.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:51c424ca134fdaeac9a4acd719d1ab48046afc60943a489028f0413fdbe9ef1c"}, + {file = "pandas-1.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ce35f947202b0b99c660221d82beb91d2e6d553d55a40b30128204e3e2c63848"}, + {file = "pandas-1.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee6f1848148ed3204235967613b0a32be2d77f214e9623f554511047705c1e04"}, + {file = "pandas-1.4.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7cc960959be28d064faefc0cb2aef854d46b827c004ebea7e79b5497ed83e7d"}, + {file = "pandas-1.4.4-cp38-cp38-win32.whl", hash = "sha256:9d805bce209714b1c1fa29bfb1e42ad87e4c0a825e4b390c56a3e71593b7e8d8"}, + {file = "pandas-1.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:afbddad78a98ec4d2ce08b384b81730de1ccc975b99eb663e6dac43703f36d98"}, + {file = "pandas-1.4.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a08ceb59db499864c58a9bf85ab6219d527d91f14c0240cc25fa2c261032b2a7"}, + {file = "pandas-1.4.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0959c41004e3d2d16f39c828d6da66ebee329836a7ecee49fb777ac9ad8a7501"}, + {file = "pandas-1.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87b4194f344dcd14c0f885cecb22005329b38bda10f1aaf7b9596a00ec8a4768"}, + {file = "pandas-1.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d2a7a3c1fea668d56bd91edbd5f2732e0af8feb9d2bf8d9bfacb2dea5fa9536"}, + {file = "pandas-1.4.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a981cfabf51c318a562deb4ae7deec594c07aee7cf18b4594a92c23718ec8275"}, + {file = "pandas-1.4.4-cp39-cp39-win32.whl", hash = "sha256:050aada67a5ec6699a7879e769825b510018a95fb9ac462bb1867483d0974a97"}, + {file = "pandas-1.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:8d4d2fe2863ecddb0ba1979bdda26c8bc2ea138f5a979abe3ba80c0fa4015c91"}, + {file = "pandas-1.4.4.tar.gz", hash = "sha256:ab6c0d738617b675183e5f28db32b5148b694ad9bba0a40c3ea26d96b431db67"}, ] parso = [ {file = "parso-0.8.3-py2.py3-none-any.whl", hash = "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75"}, {file = "parso-0.8.3.tar.gz", hash = "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0"}, ] pathspec = [ - {file = "pathspec-0.9.0-py2.py3-none-any.whl", hash = "sha256:7d15c4ddb0b5c802d161efc417ec1a2558ea2653c2e8ad9c19098201dc1c993a"}, - {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"}, + {file = "pathspec-0.10.1-py3-none-any.whl", hash = "sha256:46846318467efc4556ccfd27816e004270a9eeeeb4d062ce5e6fc7a87c573f93"}, + {file = "pathspec-0.10.1.tar.gz", hash = "sha256:7ace6161b621d31e7902eb6b5ae148d12cfd23f4a249b9ffb6b9fee12084323d"}, ] pexpect = [ {file = "pexpect-4.8.0-py2.py3-none-any.whl", hash = "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937"}, @@ -2495,8 +2652,8 @@ poetryup = [ ] pre-commit = [] prompt-toolkit = [ - {file = "prompt_toolkit-3.0.30-py3-none-any.whl", hash = "sha256:d8916d3f62a7b67ab353a952ce4ced6a1d2587dfe9ef8ebc30dd7c386751f289"}, - {file = "prompt_toolkit-3.0.30.tar.gz", hash = "sha256:859b283c50bde45f5f97829f77a4674d1c1fcd88539364f1b28a37805cfd89c0"}, + {file = "prompt_toolkit-3.0.31-py3-none-any.whl", hash = "sha256:9696f386133df0fc8ca5af4895afe5d78f5fcfe5258111c2a79a1c3e41ffa96d"}, + {file = "prompt_toolkit-3.0.31.tar.gz", hash = "sha256:9ada952c9d1787f52ff6d5f3484d0b4df8952787c087edf6a1f7c2cb1ea88148"}, ] psutil = [ {file = "psutil-5.9.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:799759d809c31aab5fe4579e50addf84565e71c1dc9f1c31258f159ff70d3f87"}, @@ -2545,24 +2702,22 @@ py = [ {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, ] pycodestyle = [ - {file = "pycodestyle-2.8.0-py2.py3-none-any.whl", hash = "sha256:720f8b39dde8b293825e7ff02c475f3077124006db4f440dcbc9a20b76548a20"}, - {file = "pycodestyle-2.8.0.tar.gz", hash = "sha256:eddd5847ef438ea1c7870ca7eb78a9d47ce0cdb4851a5523949f2601d0cbbe7f"}, + {file = "pycodestyle-2.9.1-py2.py3-none-any.whl", hash = "sha256:d1735fc58b418fd7c5f658d28d943854f8a849b01a5d0a1e6f3f3fdd0166804b"}, + {file = "pycodestyle-2.9.1.tar.gz", hash = "sha256:2c9607871d58c76354b697b42f5d57e1ada7d261c261efac224b664affdc5785"}, ] pycparser = [ {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, ] -pydata-sphinx-theme = [ - {file = "pydata_sphinx_theme-0.8.1-py3-none-any.whl", hash = "sha256:af2c99cb0b43d95247b1563860942ba75d7f1596360594fce510caaf8c4fcc16"}, - {file = "pydata_sphinx_theme-0.8.1.tar.gz", hash = "sha256:96165702253917ece13dd895e23b96ee6dce422dcc144d560806067852fe1fed"}, -] +pydata-sphinx-theme = [] pydocstyle = [ {file = "pydocstyle-6.1.1-py3-none-any.whl", hash = "sha256:6987826d6775056839940041beef5c08cc7e3d71d63149b48e36727f70144dc4"}, {file = "pydocstyle-6.1.1.tar.gz", hash = "sha256:1d41b7c459ba0ee6c345f2eb9ae827cab14a7533a88c5c6f7e94923f72df92dc"}, ] +pyedflib = [] pyflakes = [ - {file = "pyflakes-2.4.0-py2.py3-none-any.whl", hash = "sha256:3bb3a3f256f4b7968c9c788781e4ff07dce46bdf12339dcda61053375426ee2e"}, - {file = "pyflakes-2.4.0.tar.gz", hash = "sha256:05a85c2872edf37a4ed30b0cce2f6093e1d0581f8c19d7393122da7e25b2b24c"}, + {file = "pyflakes-2.5.0-py2.py3-none-any.whl", hash = "sha256:4579f67d887f804e67edb544428f264b7b24f435b263c4614f384135cea553d2"}, + {file = "pyflakes-2.5.0.tar.gz", hash = "sha256:491feb020dca48ccc562a8c0cbe8df07ee13078df59813b83959cbdada312ea3"}, ] pygments = [ {file = "Pygments-2.13.0-py3-none-any.whl", hash = "sha256:f643f331ab57ba3c9d89212ee4a2dabc6e94f117cf4eefde99a0574720d14c42"}, @@ -2573,7 +2728,10 @@ pyinform = [ {file = "pyinform-0.2.0-py3-none-any.whl", hash = "sha256:03b9ae465d88038542e2570d5220ff9e7f21982c7000afa7276dc8c80d854fcd"}, {file = "pyinform-0.2.0.tar.gz", hash = "sha256:a29075e062acc64cbafd33431dda5357c6f03dfc4a000c57f322516d418299bb"}, ] -pylint = [] +pylint = [ + {file = "pylint-2.15.0-py3-none-any.whl", hash = "sha256:4b124affc198b7f7c9b5f9ab690d85db48282a025ef9333f51d2d7281b92a6c3"}, + {file = "pylint-2.15.0.tar.gz", hash = "sha256:4f3f7e869646b0bd63b3dfb79f3c0f28fc3d2d923ea220d52620fd625aed92b0"}, +] pyparsing = [ {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"}, {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"}, @@ -2601,9 +2759,10 @@ pyrsistent = [ {file = "pyrsistent-0.18.1-cp39-cp39-win_amd64.whl", hash = "sha256:e24a828f57e0c337c8d8bb9f6b12f09dfdf0273da25fda9e314f0b684b415a07"}, {file = "pyrsistent-0.18.1.tar.gz", hash = "sha256:d4d61f8b993a7255ba714df3aca52700f8125289f84f704cf80916517c46eb96"}, ] +pyseries = [] pytest = [ - {file = "pytest-7.1.2-py3-none-any.whl", hash = "sha256:13d0e3ccfc2b6e26be000cb6568c832ba67ba32e719443bfe725814d3c42433c"}, - {file = "pytest-7.1.2.tar.gz", hash = "sha256:a06a0425453864a270bc45e71f783330a7428defb4230fb5e6a731fde06ecd45"}, + {file = "pytest-7.1.3-py3-none-any.whl", hash = "sha256:1377bda3466d70b55e3f5cecfa55bb7cfcf219c7964629b967c37cf0bda818b7"}, + {file = "pytest-7.1.3.tar.gz", hash = "sha256:4f365fec2dff9c1162f834d9f18af1ba13062db0c708bf7b946f8a5c76180c39"}, ] pytest-cov = [ {file = "pytest-cov-3.0.0.tar.gz", hash = "sha256:e7f0f5b1617d2210a2cabc266dfe2f4c75a8d32fb89eafb7ad9d06f6d076d470"}, @@ -2720,11 +2879,36 @@ requests = [ {file = "requests-2.28.1.tar.gz", hash = "sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983"}, ] scikit-learn = [] -scipy = [] +scipy = [ + {file = "scipy-1.9.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c61b4a91a702e8e04aeb0bfc40460e1f17a640977c04dda8757efb0199c75332"}, + {file = "scipy-1.9.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:d79da472015d0120ba9b357b28a99146cd6c17b9609403164b1a8ed149b4dfc8"}, + {file = "scipy-1.9.1-cp310-cp310-macosx_12_0_universal2.macosx_10_9_x86_64.whl", hash = "sha256:825951b88f56765aeb6e5e38ac9d7d47407cfaaeb008d40aa1b45a2d7ea2731e"}, + {file = "scipy-1.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f950a04b33e17b38ff561d5a0951caf3f5b47caa841edd772ffb7959f20a6af0"}, + {file = "scipy-1.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cc81ac25659fec73599ccc52c989670e5ccd8974cf34bacd7b54a8d809aff1a"}, + {file = "scipy-1.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:8d3faa40ac16c6357aaf7ea50394ea6f1e8e99d75e927a51102b1943b311b4d9"}, + {file = "scipy-1.9.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7a412c476a91b080e456229e413792bbb5d6202865dae963d1e6e28c2bb58691"}, + {file = "scipy-1.9.1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:eb954f5aca4d26f468bbebcdc5448348eb287f7bea536c6306f62ea062f63d9a"}, + {file = "scipy-1.9.1-cp38-cp38-macosx_12_0_universal2.macosx_10_9_x86_64.whl", hash = "sha256:3c6f5d1d4b9a5e4fe5e14f26ffc9444fc59473bbf8d45dc4a9a15283b7063a72"}, + {file = "scipy-1.9.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:bc4e2c77d4cd015d739e75e74ebbafed59ba8497a7ed0fd400231ed7683497c4"}, + {file = "scipy-1.9.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0419485dbcd0ed78c0d5bf234c5dd63e86065b39b4d669e45810d42199d49521"}, + {file = "scipy-1.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:34441dfbee5b002f9e15285014fd56e5e3372493c3e64ae297bae2c4b9659f5a"}, + {file = "scipy-1.9.1-cp38-cp38-win32.whl", hash = "sha256:b97b479f39c7e4aaf807efd0424dec74bbb379108f7d22cf09323086afcd312c"}, + {file = "scipy-1.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:e8fe305d9d67a81255e06203454729405706907dccbdfcc330b7b3482a6c371d"}, + {file = "scipy-1.9.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:39ab9240cd215a9349c85ab908dda6d732f7d3b4b192fa05780812495536acc4"}, + {file = "scipy-1.9.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:71487c503e036740635f18324f62a11f283a632ace9d35933b2b0a04fd898c98"}, + {file = "scipy-1.9.1-cp39-cp39-macosx_12_0_universal2.macosx_10_9_x86_64.whl", hash = "sha256:3bc1ab68b9a096f368ba06c3a5e1d1d50957a86665fc929c4332d21355e7e8f4"}, + {file = "scipy-1.9.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f7c39f7dbb57cce00c108d06d731f3b0e2a4d3a95c66d96bce697684876ce4d4"}, + {file = "scipy-1.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47d1a95bd9d37302afcfe1b84c8011377c4f81e33649c5a5785db9ab827a6ade"}, + {file = "scipy-1.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96d7cf7b25c9f23c59a766385f6370dab0659741699ecc7a451f9b94604938ce"}, + {file = "scipy-1.9.1-cp39-cp39-win32.whl", hash = "sha256:09412eb7fb60b8f00b328037fd814d25d261066ebc43a1e339cdce4f7502877e"}, + {file = "scipy-1.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:90c805f30c46cf60f1e76e947574f02954d25e3bb1e97aa8a07bc53aa31cf7d1"}, + {file = "scipy-1.9.1.tar.gz", hash = "sha256:26d28c468900e6d5fdb37d2812ab46db0ccd22c63baa095057871faa3a498bc9"}, +] seaborn = [ {file = "seaborn-0.11.2-py3-none-any.whl", hash = "sha256:85a6baa9b55f81a0623abddc4a26b334653ff4c6b18c418361de19dbba0ef283"}, {file = "seaborn-0.11.2.tar.gz", hash = "sha256:cf45e9286d40826864be0e3c066f98536982baf701a7caa386511792d61ff4f6"}, ] +serial = [] setuptools-scm = [ {file = "setuptools_scm-6.4.2-py3-none-any.whl", hash = "sha256:acea13255093849de7ccb11af9e1fb8bde7067783450cee9ef7a93139bddf6d4"}, {file = "setuptools_scm-6.4.2.tar.gz", hash = "sha256:6833ac65c6ed9711a4d5d2266f8024cfa07c533a0e55f4c12f6eff280a5a9e30"}, @@ -2733,6 +2917,7 @@ six = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] +sklearn = [] snowballstemmer = [ {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"}, {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"}, @@ -2746,18 +2931,10 @@ sphinx = [ {file = "Sphinx-4.5.0.tar.gz", hash = "sha256:7bf8ca9637a4ee15af412d1a1d9689fec70523a68ca9bb9127c2f3eeb344e2e6"}, ] sphinx-autodoc-typehints = [] -sphinx-book-theme = [ - {file = "sphinx_book_theme-0.3.3-py3-none-any.whl", hash = "sha256:9685959dbbb492af005165ef1b9229fdd5d5431580ac181578beae3b4d012d91"}, - {file = "sphinx_book_theme-0.3.3.tar.gz", hash = "sha256:0ec36208ff14c6d6bf8aee1f1f8268e0c6e2bfa3cef6e41143312b25275a6217"}, -] sphinx-copybutton = [ {file = "sphinx-copybutton-0.5.0.tar.gz", hash = "sha256:a0c059daadd03c27ba750da534a92a63e7a36a7736dcf684f26ee346199787f6"}, {file = "sphinx_copybutton-0.5.0-py3-none-any.whl", hash = "sha256:9684dec7434bd73f0eea58dda93f9bb879d24bff2d8b187b1f2ec08dfe7b5f48"}, ] -sphinx-rtd-theme = [ - {file = "sphinx_rtd_theme-1.0.0-py2.py3-none-any.whl", hash = "sha256:4d35a56f4508cfee4c4fb604373ede6feae2a306731d533f409ef5c3496fdbd8"}, - {file = "sphinx_rtd_theme-1.0.0.tar.gz", hash = "sha256:eec6d497e4c2195fa0e8b2016b337532b8a699a68bcb22a512870e16925c6a5c"}, -] sphinx-togglebutton = [] sphinxcontrib-applehelp = [ {file = "sphinxcontrib-applehelp-1.0.2.tar.gz", hash = "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58"}, @@ -2775,6 +2952,10 @@ sphinxcontrib-jsmath = [ {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"}, {file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"}, ] +sphinxcontrib-mermaid = [ + {file = "sphinxcontrib-mermaid-0.7.1.tar.gz", hash = "sha256:aa8a40b50ec86ad12824b62180240ca52a9bda8424455d7eb252eae9aa5d293c"}, + {file = "sphinxcontrib_mermaid-0.7.1-py2.py3-none-any.whl", hash = "sha256:3e20de1937c30dfa807e446bf99983d73d0dd3dc5c6524addda59800fe928762"}, +] sphinxcontrib-qthelp = [ {file = "sphinxcontrib-qthelp-1.0.3.tar.gz", hash = "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72"}, {file = "sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl", hash = "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"}, @@ -2788,7 +2969,11 @@ sphinxcontrib-websupport = [ {file = "sphinxcontrib_websupport-1.2.4-py2.py3-none-any.whl", hash = "sha256:6fc9287dfc823fe9aa432463edd6cea47fa9ebbf488d7f289b322ffcfca075c7"}, ] sqlalchemy = [] -stack-data = [] +stack-data = [ + {file = "stack_data-0.5.0-py3-none-any.whl", hash = "sha256:66d2ebd3d7f29047612ead465b6cae5371006a71f45037c7e2507d01367bce3b"}, + {file = "stack_data-0.5.0.tar.gz", hash = "sha256:715c8855fbf5c43587b141e46cc9d9339cc0d1f8d6e0f98ed0d01c6cb974e29f"}, +] +tables = [] tabulate = [ {file = "tabulate-0.8.10-py3-none-any.whl", hash = "sha256:0ba055423dbaa164b9e456abe7920c5e8ed33fcc16f6d1b2f2d152c8e1e8b4fc"}, {file = "tabulate-0.8.10-py3.8.egg", hash = "sha256:436f1c768b424654fce8597290d2764def1eea6a77cfa5c33be00b1bc0f4f63d"}, @@ -2829,7 +3014,10 @@ urllib3 = [ {file = "urllib3-1.26.12-py2.py3-none-any.whl", hash = "sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997"}, {file = "urllib3-1.26.12.tar.gz", hash = "sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e"}, ] -virtualenv = [] +virtualenv = [ + {file = "virtualenv-20.16.4-py3-none-any.whl", hash = "sha256:035ed57acce4ac35c82c9d8802202b0e71adac011a511ff650cbcf9635006a22"}, + {file = "virtualenv-20.16.4.tar.gz", hash = "sha256:014f766e4134d0008dcaa1f95bafa0fb0f575795d07cae50b1bee514185d6782"}, +] viziphant = [ {file = "viziphant-0.2.0.tar.gz", hash = "sha256:044b5c92de169dfafd9665efe2c310e917d2c21980bcc9f560d5c727161f9bd8"}, ] diff --git a/pyproject.toml b/pyproject.toml index 161f2528..7d946eb2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "MiV-OS" -version = "0.2.0" +version = "0.2.1" description = "Python software for analysis and computing framework used in MiV project." readme = "README.md" authors = ["GazzolaLab "] @@ -35,13 +35,17 @@ packages = [ ] +[tool.poetry.scripts] # TODO: query `scripts` directory +convert_open_ephys_to_miv = "scripts.convert_open_ephys_to_miv:main" + + [tool.poetry.dependencies] python = ">=3.8,<3.11" -scipy = "^1.9.0" +scipy = "^1.9.1" elephant = "^0.11.1" matplotlib = "^3.5.3" neo = "^0.10.2" -pandas = "^1.4.3" +pandas = "^1.4.4" PyWavelets = "^1.3.0" quantities = "^0.13.0" scikit-learn = "^1.1.2" @@ -50,8 +54,7 @@ tqdm = "^4.64.0" numpy = "^1.23.2" viziphant = "^0.2.0" Sphinx = {version = "^4.5.0", optional = true, extras = ["docs"]} -sphinx-rtd-theme = {version = "^1.0.0", optional = true, extras = ["docs"]} -sphinx-book-theme = {version = "^0.3.3", optional = true, extras = ["docs"]} +pydata-sphinx-theme = {version = "^0.9.0", optional = true, extras = ["docs"]} readthedocs-sphinx-search = {version = "^0.1.2", optional = true, extras = ["docs"]} sphinx-autodoc-typehints = {version = "^1.19.1", optional = true, extras = ["docs"]} myst-parser = {version = "^0.17.2", optional = true, extras = ["docs"]} @@ -62,37 +65,47 @@ myst-nb = {version = "^0.15.0", optional = true, extras = ["docs"]} pyinform = "^0.2.0" graphviz = "^0.20.1" Pillow = "9.1.1" +h5py = "^3.7.0" +h5py-cache = "^1.0" pyvis = "^0.2.1" -lyon = { git = "https://github.com/sciforce/lyon.git", rev = "91dd700" } +lyon = { git = "https://github.com/sciforce/lyon.git", optional = true, extras = ["experiment"], rev = "91dd700" } +pyseries = {extras = ["experiment", "docs"], version = "^1.0.26"} +serial = "^0.0.97" +click = "^8.1.3" +sphinxcontrib-mermaid = {version = "^0.7.1", extras = ["docs"]} poetryup = "^0.10.0" [tool.poetry.dev-dependencies] -black = "^22.6.0" +black = "^22.8.0" isort = {extras = ["colors"], version = "^5.10.1"} mypy = "^0.950" mypy-extensions = "^0.4.3" pre-commit = "^2.20.0" pydocstyle = "^6.1.1" -pylint = "^2.14.5" -pytest = "^7.1.2" +pylint = "^2.15.0" +pytest = "^7.1.3" pyupgrade = "^2.37.3" coverage = "^6.4.4" pytest-html = "^3.1.1" pytest-cov = "^3.0.0" -flake8 = "^4.0.1" +flake8 = "^5.0.4" [tool.poetry.extras] docs = [ "sphinx", - "sphinx_rtd_theme", - "sphinx-book-theme", + "pydata-sphinx-theme", "readthedocs-sphinx-search", "sphinx-autodoc-typehints", "myst-parser", "myst-nb", "numpydoc", "sphinx-togglebutton", - "sphinx-copybutton" + "sphinx-copybutton", + "sphinxcontrib-mermaid" +] +experiment = [ + "lyon", + "pyseries" ] diff --git a/scripts/README.md b/scripts/README.md new file mode 100644 index 00000000..9e0f14fa --- /dev/null +++ b/scripts/README.md @@ -0,0 +1,3 @@ +# Sciprts for MiV + +Collection of standalone executable scripts. Make sure the script is registered in `pyproject.toml`. diff --git a/scripts/convert_open_ephys_to_miv.py b/scripts/convert_open_ephys_to_miv.py new file mode 100644 index 00000000..f38939b5 --- /dev/null +++ b/scripts/convert_open_ephys_to_miv.py @@ -0,0 +1,90 @@ +# A script to convert Open Ephys data to MiV HDF5 format. + +import logging +import os +import sys + +import click +import numpy as np + +from miv.io import Data, DataManager +from miv.io import file as miv_file + + +def config_logging(verbose): + if verbose: + logging.basicConfig(level=logging.DEBUG) + else: + logging.basicConfig(level=logging.WARN) + + +script_name = os.path.basename(__file__) + + +def seq_find(f, seq): + """ + Determines if an element satisfying predicate is present in the + given sequence, returns index of element or None. + + :param f: predicate function + :param seq: sequence + :return: index of element satisfying predicate, or None + + """ + i = 0 + for x in seq: + if f(x): + return i + else: + i = i + 1 + return None + + +@click.command() +@click.option("--folder-path", "-p", required=True, type=click.Path()) +@click.option("--verbose", "-v", type=bool, default=False, is_flag=True) +def main(folder_path: str, verbose: bool): + """Convert Open Ephys data to MiV HDF5 format""" + + config_logging(verbose) + logger = logging.getLogger("miv_file") + + # Load dataset from OpenEphys recording + data_manager = DataManager(folder_path) + miv_data = miv_file.initialize() + cont = miv_file.create_container(miv_data) + + # Get signal and rate(hz) + # signal : np.array, shape(N, N_channels) + # timestamps : np.array + # sampling_rate : float + for recording in data_manager: + data_path = recording.data_path + with recording.load() as (signal, timestamps, sampling_rate): + group_id = miv_file.create_group(miv_data, data_path, counter="nrecording") + miv_file.create_dataset( + miv_data, + ["signal", "timestamps", "sampling_rate"], + group=data_path, + dtype=float, + ) + cont[f"{group_id}/signal"] = signal + cont[f"{group_id}/timestamps"] = timestamps + cont[f"{group_id}/sampling_rate"] = [sampling_rate] + miv_file.pack(miv_data, cont, logger=logger) + + miv_file.write( + f"{folder_path}/MiV_data.h5", + miv_data, + comp_type="gzip", + comp_opts=9, + logger=logger, + ) + + +if __name__ == "__main__": + main( + args=sys.argv[ + (seq_find(lambda x: os.path.basename(x) == script_name, sys.argv) + 1) : + ] + ) diff --git a/tests/core/test_spiketrain.py b/tests/core/test_spiketrain.py new file mode 100644 index 00000000..48fdc9a9 --- /dev/null +++ b/tests/core/test_spiketrain.py @@ -0,0 +1,7 @@ +from quantities import s + +from miv.core import SpikeTrain + + +def test_spiketrain_instantiation(): + assert len(SpikeTrain([1, 2, 3] * s, t_stop=10.0)[2:]) == 1 diff --git a/tests/io/mock_data.py b/tests/io/mock_data.py index 89b17410..dceab50d 100644 --- a/tests/io/mock_data.py +++ b/tests/io/mock_data.py @@ -97,7 +97,7 @@ def create_mock_data_structure(dirname, num_channels, signal_length): # Expected timestamps sampling_rate = 30000 - expected_timestamps = (timestamps - np.pi) / sampling_rate + expected_timestamps = timestamps / sampling_rate return dirname, expected_data, expected_timestamps, sampling_rate diff --git a/tests/io/test_binary_io.py b/tests/io/test_binary_io.py index e2cb4187..83a77d7f 100644 --- a/tests/io/test_binary_io.py +++ b/tests/io/test_binary_io.py @@ -85,21 +85,16 @@ def test_load_continuous_data_temp_file_with_timestamps_shift( fp[:] = signal[:] fp.flush() # Prepare timestamps.npy - timestamps = np.arange(signal_length) + np.pi + timestamps = (np.arange(signal_length) + np.pi).astype("float32") np.save(timestamps_filename, timestamps) - # With shift - raw_data, out_timestamps = load_continuous_data( - fp.filename, num_channels, freq, start_at_zero=False - ) - np.testing.assert_allclose(out_timestamps, timestamps / freq) - np.testing.assert_allclose(raw_data, signal) - # Without shift raw_data, out_timestamps = load_continuous_data( - fp.filename, num_channels, freq, start_at_zero=True + fp.filename, + num_channels, + freq, ) - np.testing.assert_allclose(out_timestamps, (timestamps - np.pi) / freq) + np.testing.assert_allclose(out_timestamps, timestamps / freq) np.testing.assert_allclose(raw_data, signal) @@ -168,7 +163,7 @@ def test_bits_to_voltage(): result = bits_to_voltage(signal, channel_info) expected_result = np.ones_like(signal) expected_result[:, 0] *= 5.0 * 1e6 - expected_result[:, 1] *= 3.0 * 1e3 * 1e6 + expected_result[:, 1] *= 3.0 * 1e3 expected_result[:, 2] *= 2.5 np.testing.assert_allclose(result, expected_result) @@ -177,8 +172,27 @@ def test_load_recording_readout_without_mask(create_mock_data_file): # TODO: Refactor into fixture mock data dirname, expected_data, expected_timestamps, sampling_rate = create_mock_data_file - out_data, out_timestamps, out_sampling_rate = load_recording(dirname) + out_data, out_timestamps, out_sampling_rate = load_recording( + dirname, start_at_zero=False + ) assert sampling_rate == out_sampling_rate np.testing.assert_allclose(out_data, expected_data) np.testing.assert_allclose(out_timestamps, expected_timestamps) + + +def test_load_recording_readout_without_mask_with_shift(create_mock_data_file): + # TODO: Refactor into fixture mock data + dirname, expected_data, expected_timestamps, sampling_rate = create_mock_data_file + + out_data, out_timestamps, out_sampling_rate = load_recording( + dirname, start_at_zero=True + ) + + assert sampling_rate == out_sampling_rate + np.testing.assert_allclose(out_data, expected_data) + np.testing.assert_allclose( + out_timestamps, + (sampling_rate * expected_timestamps - np.pi) / sampling_rate, + rtol=2e-7, + ) diff --git a/tests/io/test_file_read.py b/tests/io/test_file_read.py new file mode 100644 index 00000000..ed43b532 --- /dev/null +++ b/tests/io/test_file_read.py @@ -0,0 +1,151 @@ +import os +import time + +import h5py as h5 +import numpy as np +import pytest + +from miv.io import file as miv_file + + +def isEmpty(dictionary): + test = True + print(dictionary.keys()) + for key in dictionary.keys(): + print(key) + print(dictionary[key]) + print(type(dictionary[key])) + if dictionary[key] is None: + test = True + elif type(dictionary[key]) == list or type(dictionary[key]) == np.ndarray: + if len(dictionary[key]) > 0: + test = False + + return test + + +@pytest.fixture(name="mock_h5_file") +def fixture_mock_h5_file(tmp_path): + + filename = os.path.join(tmp_path, "MiV_TESTS.h5") + + data = miv_file.initialize() + + miv_file.create_group(data, "coordinates", counter="ncoords") + miv_file.create_dataset(data, ["px", "py", "pz"], group="coordinates", dtype=float) + + miv_file.create_dataset(data, ["u", "v"], group="electrodes", dtype=float) + + event = miv_file.create_container(data) + + for i in range(0, 10): + + ncoords = 5 + event["coordinates/ncoords"] = ncoords + + for n in range(ncoords): + event["coordinates/px"].append(np.random.random()) + event["coordinates/py"].append(np.random.random()) + event["coordinates/pz"].append(np.random.random()) + + event["electrodes/u"].append(np.random.random()) + event["electrodes/v"].append(np.random.random()) + + miv_file.pack(data, event) + + miv_file.write(filename, data, comp_type="gzip", comp_opts=9) + return filename + + +def test_read(mock_h5_file): + filename = mock_h5_file + + desired_datasets = ["coordinates", "electrodes"] + subset = 5 + + test_data, test_container = miv_file.read(filename, desired_datasets, subset) + + assert isinstance(test_data, dict) + assert isinstance(test_container, dict) + + assert isEmpty(test_container) + assert not isEmpty(test_data) + + # Testing desired_datasets + assert "coordinates/px" in test_data.keys() + + # Testing subsets + assert len(test_data["coordinates/ncoords"]) == 5 + + test_data, test_container = miv_file.read(filename, desired_datasets, 1000) + + assert len(test_data["coordinates/ncoords"]) == 10 + + # Passing in a range of subsets + subset = (0, 4) + test_data, test_container = miv_file.read(filename, desired_datasets, subset=subset) + assert len(test_data["coordinates/ncoords"]) == 4 + + subset = (1, 5) + test_data, test_container = miv_file.read(filename, desired_datasets, subset=subset) + assert len(test_data["coordinates/ncoords"]) == 4 + + subset = [1, 5] + test_data, test_container = miv_file.read(filename, desired_datasets, subset=subset) + assert len(test_data["coordinates/ncoords"]) == 4 + + # Test for poor uses of subset + test_data, test_container = miv_file.read(filename, desired_datasets, [0, 0]) + + assert len(test_data["_LIST_OF_DATASETS_"]) == 0 + assert len(test_container.keys()) == 0 + + test_data, test_container = miv_file.read(filename, desired_datasets, [10, 0]) + + assert len(test_data["_LIST_OF_DATASETS_"]) == 0 + assert len(test_container.keys()) == 0 + + test_data, test_container = miv_file.read(filename, desired_datasets, subset=0) + + assert len(test_data["_LIST_OF_DATASETS_"]) == 0 + assert len(test_container.keys()) == 0 + + +def test_unpack(mock_h5_file): + filename = mock_h5_file + + # This assumes you run nosetests from the h5hep directory and not + # the tests directory. + desired_datasets = ["coordinates", "electrodes"] + subset = 10 + + container, data = miv_file.read(filename, desired_datasets, subset) + + miv_file.unpack(data, container) + + assert not isEmpty(container) + + +def test_get_ncontainers_in_file(mock_h5_file): + filename = mock_h5_file + + ncontainers = miv_file.get_ncontainers_in_file(filename) + + assert ncontainers == 10 + + +def test_get_file_metadata(mock_h5_file): + filename = mock_h5_file + + metadata = miv_file.get_file_metadata(filename) + + assert "date" in metadata + assert "h5py_version" in metadata + assert "numpy_version" in metadata + assert "python_version" in metadata + + # Check default attributes are strings + assert isinstance(metadata["date"], str) + assert isinstance(metadata["h5py_version"], str) + assert isinstance(metadata["numpy_version"], str) + assert isinstance(metadata["python_version"], str) diff --git a/tests/io/test_file_write.py b/tests/io/test_file_write.py new file mode 100644 index 00000000..740c755f --- /dev/null +++ b/tests/io/test_file_write.py @@ -0,0 +1,193 @@ +import os +import sys +import time + +import h5py as h5 +import numpy as np +import pytest + +from miv.io import file as miv_file +from tests.io.test_file_read import fixture_mock_h5_file + + +def isEmpty(dictionary): + test = True + print(dictionary.keys()) + for key in dictionary.keys(): + if dictionary[key] is None: + test = True + elif type(dictionary[key]) == list or type(dictionary[key]) == np.ndarray: + if len(dictionary[key]) > 0: + test = False + + return test + + +def test_initialize(): + + test_data = miv_file.initialize() + + assert isinstance(test_data, dict) + + +def test_clear_container(mock_h5_file): + filename = mock_h5_file + + desired_datasets = ["coordinates", "electrodes"] + subset = 1000 + + data, container = miv_file.read(filename, desired_datasets, subset) + + miv_file.clear_container(container) + + assert isEmpty(container) + + +def test_create_container(): + + data = miv_file.initialize() + + miv_file.create_group(data, "coordinates", counter="ncoords") + miv_file.create_dataset(data, ["px", "py", "pz"], group="coordinates", dtype=float) + + miv_file.create_group(data, "electrodes", counter="nelectrodes") + miv_file.create_dataset(data, ["u", "v"], group="electrodes", dtype=float) + + test_container = miv_file.create_container(data) + + assert not isEmpty(test_container) + assert isinstance(test_container, dict) + + +def test_create_group(): + + data = miv_file.initialize() + miv_file.create_group(data, "coordinates", counter="ncoords") + + assert not isEmpty(data["_GROUPS_"]) + assert "coordinates/ncoords" in data.keys() + + miv_file.create_group(data, "test/slash", counter="ntest/slash") + + assert "test-slash" in data["_GROUPS_"] + assert "test-slash/ntest-slash" in data.keys() + + +def test_pack(): + + data = miv_file.initialize() + miv_file.create_group(data, "obj", counter="nobj") + miv_file.create_dataset(data, ["myfloat"], group="obj", dtype=float) + miv_file.create_dataset(data, ["myint"], group="obj", dtype=int) + miv_file.create_dataset(data, ["mystr"], group="obj", dtype=str) + + container = miv_file.create_container(data) + + # Normal packing test + + for i in range(5): + container["obj/myfloat"].append(2.0) + container["obj/myint"].append(2) + container["obj/mystr"].append("two") + container["obj/nobj"] = 5 + + test = miv_file.pack(data, container) + assert test == 0 + assert len(data["obj/myfloat"]) == 5 + assert len(data["obj/myint"]) == 5 + assert len(data["obj/mystr"]) == 5 + assert data["obj/nobj"][0] == 5 + + assert len(container["obj/myfloat"]) == 0 + assert len(container["obj/myint"]) == 0 + assert len(container["obj/mystr"]) == 0 + assert container["obj/nobj"] == 0 + + # AUTO_SET_COUNTER = False + container["obj/myfloat"].append(2.0) + container["obj/myint"].append(2) + container["obj/mystr"].append("two") + + # Is the mistake propagated? + container["obj/nobj"] = 2 + + miv_file.pack(data, container, AUTO_SET_COUNTER=False) + assert data["obj/nobj"][1] == 2 + + # Fix mistake + data["obj/nobj"][1] = 2 + + # STRICT_CHECKING = True + container["obj/myfloat"].append(2.0) + container["obj/myint"].append(2) + # 1 != 0, strict checking should fail. + + test = 0 + try: + miv_file.pack(data, container, STRICT_CHECKING=True) + except RuntimeError: + test = -1 + + # Was the mistake caught? + assert test == -1 + # Was nothing packed? + assert len(data["obj/myint"]) == 6 + # Is container not cleared? + assert not isEmpty(container) + + # EMPTY_OUT_CONTAINER = False + + container["obj/mystr"].append("two") + + miv_file.pack(data, container, EMPTY_OUT_CONTAINER=False) + + assert not isEmpty(container) + + # assert type(data['obj/mystr'][0]) is str + + +def test_create_dataset(): + + data = miv_file.initialize() + miv_file.create_group(data, "coordinates", counter="ncoords") + miv_file.create_dataset(data, ["px", "py", "pz"], group="coordinates", dtype=float) + miv_file.create_dataset(data, ["e"], group="coordinates", dtype=int) + + assert not isEmpty(data["_GROUPS_"]) + assert "coordinates/ncoords" in data.keys() + assert "coordinates/px" in data.keys() + assert "coordinates/e" in data["_MAP_DATASETS_TO_COUNTERS_"].keys() + assert data["_MAP_DATASETS_TO_COUNTERS_"]["coordinates/e"] == "coordinates/ncoords" + assert data["_MAP_DATASETS_TO_DATA_TYPES_"]["coordinates/px"] == float + assert data["_MAP_DATASETS_TO_DATA_TYPES_"]["coordinates/e"] == int + + +def test_write_metadata(mock_h5_file): + + filename = mock_h5_file + file = h5.File(filename, "r") + + # Check default attribute existence + assert "date" in file.attrs.keys() + # assert 'miv_file_version' in file.attrs.keys() + assert "h5py_version" in file.attrs.keys() + assert "numpy_version" in file.attrs.keys() + assert "python_version" in file.attrs.keys() + + # Check default attributes are strings + assert isinstance(file.attrs["date"], str) + # assert isinstance(file.attrs['miv_file_version'], str) + assert isinstance(file.attrs["h5py_version"], str) + assert isinstance(file.attrs["numpy_version"], str) + assert isinstance(file.attrs["python_version"], str) + + file.close() + + # Adding a new attribute + miv_file.write_metadata(filename, {"author": "John Doe"}) + file = h5.File(filename, "r") + + assert "author" in file.attrs.keys() + assert file.attrs["author"] == "John Doe" + + file.close() diff --git a/tests/statistics/test_fano_factor.py b/tests/statistics/test_fano_factor.py index 97380557..20ed3ae7 100644 --- a/tests/statistics/test_fano_factor.py +++ b/tests/statistics/test_fano_factor.py @@ -34,3 +34,6 @@ def test_fano_factor_output(): with np.testing.assert_raises(AssertionError): output = fano_factor(seg1.spiketrains, 0, 0, 10, 10) # The function above should throw an error since there are no spike to compute variance and mean + with np.testing.assert_raises(AssertionError): + output = fano_factor(seg1.spiketrains, 0, 0, 0, 10) + # The function above should throw an error since start time cannot be same or greater than end time diff --git a/tests/statistics/test_info_theory.py b/tests/statistics/test_info_theory.py new file mode 100644 index 00000000..930668fe --- /dev/null +++ b/tests/statistics/test_info_theory.py @@ -0,0 +1,95 @@ +import numpy as np +import pytest +from neo.core import Segment, SpikeTrain + +# Test set For Info_Theory module +from miv.statistics import ( + active_information, + block_entropy, + conditional_entropy, + entropy_rate, + mutual_information, + relative_entropy, + shannon_entropy, + transfer_entropy, +) + +seg = Segment(index=1) +train0 = SpikeTrain( + times=[0, 1, 2], + units="sec", + t_stop=3, +) +seg.spiketrains.append(train0) + + +def test_shannon_entropy_output(): + with np.testing.assert_raises(AssertionError): + output = shannon_entropy(seg.spiketrains, 0, 0, 0, 0.1) + with np.testing.assert_raises(AssertionError): + output = shannon_entropy(seg.spiketrains, 0, 0, 1, 0) + output = shannon_entropy(seg.spiketrains, 0, 0, 1, 1) + np.testing.assert_allclose(output, 1.0) + + +def test_block_entropy_output(): + with np.testing.assert_raises(AssertionError): + output = block_entropy(seg.spiketrains, 0, 1, 0, 0, 0.1) + with np.testing.assert_raises(AssertionError): + output = block_entropy(seg.spiketrains, 0, 1, 0, 1, 0) + output = block_entropy(seg.spiketrains, 0, 1, 0, 1, 1) + np.testing.assert_allclose(output, 0.0) + + +def test_entropy_rate_output(): + with np.testing.assert_raises(AssertionError): + output = entropy_rate(seg.spiketrains, 0, 1, 0, 0, 0.1) + with np.testing.assert_raises(AssertionError): + output = entropy_rate(seg.spiketrains, 0, 1, 0, 1, 0) + output = entropy_rate(seg.spiketrains, 0, 1, 0, 1, 1) + np.testing.assert_allclose(output, 0.0) + + +def test_active_information_output(): + with np.testing.assert_raises(AssertionError): + output = active_information(seg.spiketrains, 0, 1, 0, 0, 0.1) + with np.testing.assert_raises(AssertionError): + output = active_information(seg.spiketrains, 0, 1, 0, 1, 0) + output = active_information(seg.spiketrains, 0, 1, 0, 1, 1) + np.testing.assert_allclose(output, 0.0) + + +def test_mutual_information_output(): + with np.testing.assert_raises(AssertionError): + output = mutual_information(seg.spiketrains, 0, 0, 0, 0, 0.1) + with np.testing.assert_raises(AssertionError): + output = mutual_information(seg.spiketrains, 0, 0, 0, 1, 0) + output = mutual_information(seg.spiketrains, 0, 0, 0, 1, 1) + np.testing.assert_allclose(output, 0.0) + + +def test_relative_entropy_output(): + with np.testing.assert_raises(AssertionError): + output = relative_entropy(seg.spiketrains, 0, 0, 0, 0, 0.1) + with np.testing.assert_raises(AssertionError): + output = relative_entropy(seg.spiketrains, 0, 0, 0, 1, 0) + output = relative_entropy(seg.spiketrains, 0, 0, 0, 1, 1) + np.testing.assert_allclose(output, 0.0) + + +def test_conditional_entropy_output(): + with np.testing.assert_raises(AssertionError): + output = conditional_entropy(seg.spiketrains, 0, 0, 0, 0, 0.1) + with np.testing.assert_raises(AssertionError): + output = conditional_entropy(seg.spiketrains, 0, 0, 0, 1, 0) + output = conditional_entropy(seg.spiketrains, 0, 0, 0, 1, 1) + np.testing.assert_allclose(output, 0.0) + + +def test_transfer_entropy_output(): + with np.testing.assert_raises(AssertionError): + output = transfer_entropy(seg.spiketrains, 0, 0, 1, 0, 0, 0.1) + with np.testing.assert_raises(AssertionError): + output = transfer_entropy(seg.spiketrains, 0, 0, 1, 0, 1, 0) + output = transfer_entropy(seg.spiketrains, 0, 0, 1, 0, 1, 1) + np.testing.assert_allclose(output, 0.0) diff --git a/tests/statistics/test_spikestamps_statistics.py b/tests/statistics/test_spikestamps_statistics.py index e997be37..2d49b492 100644 --- a/tests/statistics/test_spikestamps_statistics.py +++ b/tests/statistics/test_spikestamps_statistics.py @@ -1,9 +1,10 @@ import numpy as np import pytest import quantities as pq -from neo.core import SpikeTrain +from neo.core import Segment, SpikeTrain from miv.statistics.spiketrain_statistics import ( + binned_spiketrain, firing_rates, interspike_intervals, peri_stimulus_time, @@ -53,3 +54,21 @@ def test_interspike_interval_neo(spikestamps, true_interval): for spikestamp, interval in zip(spikestamps, true_interval): result = interspike_intervals(spikestamp) np.testing.assert_allclose(result.magnitude, interval) + + +def test_binned_spiketrain(): + seg = Segment(index=1) + train0 = SpikeTrain( + times=[0.1, 1.2, 1.3, 1.4, 1.5, 1.6, 4, 5, 5.1, 5.2, 8, 9.5], + units="sec", + t_stop=10, + ) + seg.spiketrains.append(train0) + with np.testing.assert_raises(AssertionError): + output = binned_spiketrain(seg.spiketrains, 0, 0, 0, 0.1) + # start time must be less than end time + with np.testing.assert_raises(AssertionError): + output = binned_spiketrain(seg.spiketrains, 0, 0, 5, 0) + # bin_size cannot be negative + output = binned_spiketrain(seg.spiketrains, 0, 2, 5, 1) + np.testing.assert_allclose(output, [0, 0, 1, 1]) diff --git a/tests/visualization/test_plot_connectivity.py b/tests/visualization/test_plot_connectivity.py index 458e1741..5825f91f 100644 --- a/tests/visualization/test_plot_connectivity.py +++ b/tests/visualization/test_plot_connectivity.py @@ -12,8 +12,10 @@ def test_plot_connectivity_output(): # Initialize the spiketrain as below mea_map = np.array([[1, 2], [3, 4], [5, 6]]) mea_map1 = np.array([[0, 0], [0, 0], [0, 0]]) + mea_map_t = np.array([[1, 2]]) connectivity_matrix1 = np.array([[2, 3], [10, 11]]) connectivity_matrix2 = np.array([[2, 3], [5, 8], [9, 10]]) + connectivity_matrix_t = np.array([[0, 0.1], [0.1, 0]]) with pytest.raises(AssertionError): plot_connectivity(mea_map, connectivity_matrix1, False) @@ -24,6 +26,10 @@ def test_plot_connectivity_output(): with pytest.raises(AssertionError): plot_connectivity(mea_map1, connectivity_matrix2, True) # The function above should throw an error since MEA map contains no identification + output = plot_connectivity(mea_map_t, connectivity_matrix_t, False) + assert output.engine == "neato" + output = plot_connectivity(mea_map_t, connectivity_matrix_t, True) + assert output.engine == "neato" def test_plot_connectivity_interactive_output(): @@ -31,8 +37,10 @@ def test_plot_connectivity_interactive_output(): # Initialize the spiketrain as below mea_map = np.array([[1, 2], [3, 4], [5, 6]]) mea_map1 = np.array([[0, 0], [0, 0], [0, 0]]) + mea_map_t = np.array([[1, 2]]) connectivity_matrix1 = np.array([[2, 3], [10, 11]]) connectivity_matrix2 = np.array([[2, 3], [5, 8], [9, 10]]) + connectivity_matrix_t = np.array([[0, 0.1], [0.1, 0]]) with pytest.raises(AssertionError): plot_connectivity_interactive(mea_map, connectivity_matrix1, False) @@ -43,3 +51,8 @@ def test_plot_connectivity_interactive_output(): with pytest.raises(AssertionError): plot_connectivity_interactive(mea_map1, connectivity_matrix2, True) # The function above should throw an error since MEA map contains no identification + + output = plot_connectivity_interactive(mea_map_t, connectivity_matrix_t, False) + assert output.get_edges()[0]["width"] == "1" + output = plot_connectivity_interactive(mea_map_t, connectivity_matrix_t, True) + assert output.get_edges()[0]["width"] == "1"