From 6b49b5eeb1deb47e5b62efbf25829b8f3ea81852 Mon Sep 17 00:00:00 2001 From: Eve-ning Date: Wed, 26 Jun 2024 19:45:41 +0800 Subject: [PATCH] Make .env non-mandatory --- .../topics/Get-Started-with-Dev-Containers.md | 5 + Writerside/topics/Getting-Started.md | 260 +++++++++--------- docs/HelpTOC.json | 2 +- docs/custom-k-aug-dataloaders.html | 8 +- docs/get-started-with-dev-containers.html | 6 +- docs/getting-started.html | 44 +-- docs/load-dataset.html | 10 +- docs/load-gcs.html | 8 +- docs/mix-match-module.html | 14 +- docs/mix-match.html | 4 +- docs/ml-architecture.html | 4 +- docs/model-test-chestnut-may-dec.html | 4 +- docs/overview.html | 4 +- docs/preprocessing-extract-segments.html | 18 +- docs/preprocessing-glcm-padded.html | 6 +- docs/preprocessing-morphology.html | 8 +- docs/preprocessing-scale.html | 6 +- docs/retrieve-our-datasets.html | 22 +- src/frdc/conf.py | 5 +- 19 files changed, 225 insertions(+), 213 deletions(-) diff --git a/Writerside/topics/Get-Started-with-Dev-Containers.md b/Writerside/topics/Get-Started-with-Dev-Containers.md index 750bead5..342721f5 100644 --- a/Writerside/topics/Get-Started-with-Dev-Containers.md +++ b/Writerside/topics/Get-Started-with-Dev-Containers.md @@ -47,3 +47,8 @@ steps such as: - Google Cloud Application Default Credentials - Weight & Bias API Key - Label Studio API Key + +> You can set the API Keys in the `.env` file in the root of the project. +> Be careful not to commit the `.env` file to the repository, which should +> have been ignored by default. +{style='note'} \ No newline at end of file diff --git a/Writerside/topics/Getting-Started.md b/Writerside/topics/Getting-Started.md index c62ee26f..746c93f8 100644 --- a/Writerside/topics/Getting-Started.md +++ b/Writerside/topics/Getting-Started.md @@ -1,155 +1,161 @@ # Getting Started -> Want to use a Dev Container? See [Get Started with Dev Containers](Get-Started-with-Dev-Containers.md) +> Want to use a Dev Container? +> See [Get Started with Dev Containers](Get-Started-with-Dev-Containers.md) - Ensure that you have the right version of Python. - The required Python version can be seen in pyproject.toml - - [tool.poetry.dependencies] - python = "..." - - - Start by cloning our repository. - - git clone https://github.com/FR-DC/FRDC-ML.git - - - Then, create a Python Virtual Env pyvenv - - - python -m venv venv/ - - - python3 -m venv venv/ - - - - - Install Poetry - Then check if it's installed with - poetry --version - - If poetry is not found, it's likely not in the user PATH. - - - Activate the virtual environment - - + Ensure that you have the right version of Python. + The required Python version can be seen in pyproject.toml + + [tool.poetry.dependencies] + python = "..." + + + Start by cloning our repository. + + git clone https://github.com/FR-DC/FRDC-ML.git + + + Then, create a Python Virtual Env pyvenv + + + python -m venv venv/ + + + python3 -m venv venv/ + + + + + Install Poetry + Then check if it's installed with + poetry --version + + If poetry is not found, it's likely not in the user PATH. + + + Activate the virtual environment + + - cd venv/Scripts - activate - cd ../.. + cd venv/Scripts + activate + cd ../.. - - + + - source venv/bin/activate + source venv/bin/activate - - - - Install the dependencies. You should be in the same directory as - pyproject.toml - - poetry install --with dev - - - Install Pre-Commit Hooks - - pre-commit install - - + + + + Install the dependencies. You should be in the same directory as + pyproject.toml + + poetry install --with dev + + + Install Pre-Commit Hooks + + pre-commit install + + - - We use Google Cloud to store our datasets. To set up Google Cloud, - - install the Google Cloud CLI - - - - Then, - - authenticate your account - . - gcloud auth login - - - Finally, - - set up Application Default Credentials (ADC) - . - gcloud auth application-default login - - - To make sure everything is working, run the tests. - + + We use Google Cloud to store our datasets. To set up Google Cloud, + + install the Google Cloud CLI + + + + Then, + + authenticate your account + . + gcloud auth login + + + Finally, + + set up Application Default Credentials (ADC) + . + gcloud auth application-default login + + + To make sure everything is working, run the tests. + - This is only necessary if any task requires Label Studio annotations - - We use Label Studio to annotate our datasets. - We won't go through how to install Label Studio, for contributors, it - should be up on localhost:8080. - - - Then, retrieve your own API key from Label Studio. - Go to your account page - and copy the API key.
- Set your API key as an environment variable. - - + This is only necessary if any task requires Label Studio annotations + + We use Label Studio to annotate our datasets. + We won't go through how to install Label Studio, for contributors, it + should be up on localhost:8080. + + + Then, retrieve your own API key from Label Studio. + Go to your account page + and copy the API key.
+ Set your API key as an environment variable. + + In Windows, go to "Edit environment variables for your account" and add this as a new environment variable with name LABEL_STUDIO_API_KEY. - - + + Export it as an environment variable. export LABEL_STUDIO_API_KEY=... - - - +
+ + In all cases, you can create a .env file in the root of + the project and add the following line: + LABEL_STUDIO_API_KEY=... + +
+
- - We use W&B to track our experiments. To set up W&B, - - install the W&B CLI - - - - Then, - - authenticate your account - . - wandb login - + + We use W&B to track our experiments. To set up W&B, + + install the W&B CLI + + + + Then, + + authenticate your account + . + wandb login + - This is optional but recommended. - Pre-commit hooks are a way to ensure that your code is formatted correctly. - This is done by running a series of checks before you commit your code. - - - - pre-commit install - - + This is optional but recommended. + Pre-commit hooks are a way to ensure that your code is formatted correctly. + This is done by running a series of checks before you commit your code. + + + + pre-commit install + + - - Run the tests to make sure everything is working - - pytest - - + + Run the tests to make sure everything is working + + pytest + + ## Troubleshooting @@ -174,13 +180,15 @@ See [Setting Up Google Cloud](#gcloud) ### Couldn't connect to Label Studio Label Studio must be running locally, exposed on `localhost:8080`. Furthermore, -you need to specify the `LABEL_STUDIO_API_KEY` environment variable. See +you need to specify the `LABEL_STUDIO_API_KEY` environment variable. See [Setting Up Label Studio](#ls) ### Cannot login to W&B -You need to authenticate your W&B account. See [Setting Up Weight and Biases](#wandb) -If you're facing difficulties, set the `WANDB_MODE` environment variable to `offline` +You need to authenticate your W&B account. +See [Setting Up Weight and Biases](#wandb) +If you're facing difficulties, set the `WANDB_MODE` environment variable +to `offline` to disable W&B. ## Our Repository Structure diff --git a/docs/HelpTOC.json b/docs/HelpTOC.json index 59ab5806..98bda536 100644 --- a/docs/HelpTOC.json +++ b/docs/HelpTOC.json @@ -1 +1 @@ -{"entities":{"pages":{"Overview":{"id":"Overview","title":"Overview","url":"overview.html","level":0,"tabIndex":0},"ML-Architecture":{"id":"ML-Architecture","title":"ML Architecture","url":"ml-architecture.html","level":0,"tabIndex":1},"Getting-Started":{"id":"Getting-Started","title":"Getting Started","url":"getting-started.html","level":0,"pages":["Get-Started-with-Dev-Containers"],"tabIndex":2},"Get-Started-with-Dev-Containers":{"id":"Get-Started-with-Dev-Containers","title":"Get Started with Dev Containers","url":"get-started-with-dev-containers.html","level":1,"parentId":"Getting-Started","tabIndex":0},"-6vddrq_5799":{"id":"-6vddrq_5799","title":"Tutorials","level":0,"pages":["Retrieve-our-Datasets"],"tabIndex":3},"Retrieve-our-Datasets":{"id":"Retrieve-our-Datasets","title":"Retrieve our Datasets","url":"retrieve-our-datasets.html","level":1,"parentId":"-6vddrq_5799","tabIndex":0},"mix-match":{"id":"mix-match","title":"MixMatch","url":"mix-match.html","level":0,"pages":["mix-match-module","custom-k-aug-dataloaders"],"tabIndex":4},"mix-match-module":{"id":"mix-match-module","title":"MixMatch Module","url":"mix-match-module.html","level":1,"parentId":"mix-match","tabIndex":0},"custom-k-aug-dataloaders":{"id":"custom-k-aug-dataloaders","title":"Custom K-Aug Dataloaders","url":"custom-k-aug-dataloaders.html","level":1,"parentId":"mix-match","tabIndex":1},"-6vddrq_5804":{"id":"-6vddrq_5804","title":"Model Tests","level":0,"pages":["Model-Test-Chestnut-May-Dec"],"tabIndex":5},"Model-Test-Chestnut-May-Dec":{"id":"Model-Test-Chestnut-May-Dec","title":"Model Test Chestnut May-Dec","url":"model-test-chestnut-may-dec.html","level":1,"parentId":"-6vddrq_5804","tabIndex":0},"-6vddrq_5806":{"id":"-6vddrq_5806","title":"API","level":0,"pages":["load.dataset","load.gcs","preprocessing.scale","preprocessing.extract_segments","preprocessing.morphology","preprocessing.glcm_padded"],"tabIndex":6},"load.dataset":{"id":"load.dataset","title":"load.dataset","url":"load-dataset.html","level":1,"parentId":"-6vddrq_5806","tabIndex":0},"load.gcs":{"id":"load.gcs","title":"load.gcs","url":"load-gcs.html","level":1,"parentId":"-6vddrq_5806","tabIndex":1},"preprocessing.scale":{"id":"preprocessing.scale","title":"preprocessing.scale","url":"preprocessing-scale.html","level":1,"parentId":"-6vddrq_5806","tabIndex":2},"preprocessing.extract_segments":{"id":"preprocessing.extract_segments","title":"preprocessing.extract_segments","url":"preprocessing-extract-segments.html","level":1,"parentId":"-6vddrq_5806","tabIndex":3},"preprocessing.morphology":{"id":"preprocessing.morphology","title":"preprocessing.morphology","url":"preprocessing-morphology.html","level":1,"parentId":"-6vddrq_5806","tabIndex":4},"preprocessing.glcm_padded":{"id":"preprocessing.glcm_padded","title":"preprocessing.glcm_padded","url":"preprocessing-glcm-padded.html","level":1,"parentId":"-6vddrq_5806","tabIndex":5}}},"topLevelIds":["Overview","ML-Architecture","Getting-Started","-6vddrq_5799","mix-match","-6vddrq_5804","-6vddrq_5806"]} \ No newline at end of file +{"entities":{"pages":{"Overview":{"id":"Overview","title":"Overview","url":"overview.html","level":0,"tabIndex":0},"ML-Architecture":{"id":"ML-Architecture","title":"ML Architecture","url":"ml-architecture.html","level":0,"tabIndex":1},"Getting-Started":{"id":"Getting-Started","title":"Getting Started","url":"getting-started.html","level":0,"pages":["Get-Started-with-Dev-Containers"],"tabIndex":2},"Get-Started-with-Dev-Containers":{"id":"Get-Started-with-Dev-Containers","title":"Get Started with Dev Containers","url":"get-started-with-dev-containers.html","level":1,"parentId":"Getting-Started","tabIndex":0},"-6vddrq_6549":{"id":"-6vddrq_6549","title":"Tutorials","level":0,"pages":["Retrieve-our-Datasets"],"tabIndex":3},"Retrieve-our-Datasets":{"id":"Retrieve-our-Datasets","title":"Retrieve our Datasets","url":"retrieve-our-datasets.html","level":1,"parentId":"-6vddrq_6549","tabIndex":0},"mix-match":{"id":"mix-match","title":"MixMatch","url":"mix-match.html","level":0,"pages":["mix-match-module","custom-k-aug-dataloaders"],"tabIndex":4},"mix-match-module":{"id":"mix-match-module","title":"MixMatch Module","url":"mix-match-module.html","level":1,"parentId":"mix-match","tabIndex":0},"custom-k-aug-dataloaders":{"id":"custom-k-aug-dataloaders","title":"Custom K-Aug Dataloaders","url":"custom-k-aug-dataloaders.html","level":1,"parentId":"mix-match","tabIndex":1},"-6vddrq_6554":{"id":"-6vddrq_6554","title":"Model Tests","level":0,"pages":["Model-Test-Chestnut-May-Dec"],"tabIndex":5},"Model-Test-Chestnut-May-Dec":{"id":"Model-Test-Chestnut-May-Dec","title":"Model Test Chestnut May-Dec","url":"model-test-chestnut-may-dec.html","level":1,"parentId":"-6vddrq_6554","tabIndex":0},"-6vddrq_6556":{"id":"-6vddrq_6556","title":"API","level":0,"pages":["load.dataset","load.gcs","preprocessing.scale","preprocessing.extract_segments","preprocessing.morphology","preprocessing.glcm_padded"],"tabIndex":6},"load.dataset":{"id":"load.dataset","title":"load.dataset","url":"load-dataset.html","level":1,"parentId":"-6vddrq_6556","tabIndex":0},"load.gcs":{"id":"load.gcs","title":"load.gcs","url":"load-gcs.html","level":1,"parentId":"-6vddrq_6556","tabIndex":1},"preprocessing.scale":{"id":"preprocessing.scale","title":"preprocessing.scale","url":"preprocessing-scale.html","level":1,"parentId":"-6vddrq_6556","tabIndex":2},"preprocessing.extract_segments":{"id":"preprocessing.extract_segments","title":"preprocessing.extract_segments","url":"preprocessing-extract-segments.html","level":1,"parentId":"-6vddrq_6556","tabIndex":3},"preprocessing.morphology":{"id":"preprocessing.morphology","title":"preprocessing.morphology","url":"preprocessing-morphology.html","level":1,"parentId":"-6vddrq_6556","tabIndex":4},"preprocessing.glcm_padded":{"id":"preprocessing.glcm_padded","title":"preprocessing.glcm_padded","url":"preprocessing-glcm-padded.html","level":1,"parentId":"-6vddrq_6556","tabIndex":5}}},"topLevelIds":["Overview","ML-Architecture","Getting-Started","-6vddrq_6549","mix-match","-6vddrq_6554","-6vddrq_6556"]} \ No newline at end of file diff --git a/docs/custom-k-aug-dataloaders.html b/docs/custom-k-aug-dataloaders.html index 0d084f17..98a565ab 100644 --- a/docs/custom-k-aug-dataloaders.html +++ b/docs/custom-k-aug-dataloaders.html @@ -1,5 +1,5 @@ -Custom K-Aug Dataloaders | Documentation

Documentation 0.1.2 Help

Custom K-Aug Dataloaders

In MixMatch, implementing the data loading methods is quite unconventional.

  1. We need to load multiple augmented versions of the same image into the same batch.

  2. The labelled set is usually too small, causing a premature end to the epoch as it runs out of samples to draw from faster than the unlabelled set.

This can be rather tricky to implement in PyTorch. This tutorial will illustrate how we did it.

Loading Multiple Augmented Versions of the Same Image

See: frdc/load/dataset.py FRDCDataset.__getitem__

In MixMatch, a single train batch must consist of:

  1. A batch of labeled images

  2. K batches of unlabeled images

Aug
Aug
Aug
Aug
Get Batch
Aug Labelled Batch
Unlabelled Batch
Aug Unl. Batch 1
Aug Unl. Batch i
Aug Unl. Batch K

Keep in mind that the unlabelled batch, is a single batch of images, not separate draws of batches. It is then "duplicated" K times, and each copy is augmented differently.

Solution 1: Custom Dataset

To solve this, we need to understand the role of both a Dataset and a DataLoader.

  • A Dataset represents a collection of data, responsible for loading and returning something.

  • A DataLoader draws samples from a Dataset and returns batched samples.

The key here is that a Dataset is not limited to returning 1 sample at a time, we can make it return the K augmented versions of the same image.

Aug
Aug
Aug
Sample
Aug Sample 1
Aug Sample i
Aug Sample K

In code, this is done by subclassing the Dataset class and overriding the __getitem__ method.

+}

Documentation 0.1.2 Help

Custom K-Aug Dataloaders

In MixMatch, implementing the data loading methods is quite unconventional.

  1. We need to load multiple augmented versions of the same image into the same batch.

  2. The labelled set is usually too small, causing a premature end to the epoch as it runs out of samples to draw from faster than the unlabelled set.

This can be rather tricky to implement in PyTorch. This tutorial will illustrate how we did it.

Loading Multiple Augmented Versions of the Same Image

See: frdc/load/dataset.py FRDCDataset.__getitem__

In MixMatch, a single train batch must consist of:

  1. A batch of labeled images

  2. K batches of unlabeled images

Aug
Aug
Aug
Aug
Get Batch
Aug Labelled Batch
Unlabelled Batch
Aug Unl. Batch 1
Aug Unl. Batch i
Aug Unl. Batch K

Keep in mind that the unlabelled batch, is a single batch of images, not separate draws of batches. It is then "duplicated" K times, and each copy is augmented differently.

Solution 1: Custom Dataset

To solve this, we need to understand the role of both a Dataset and a DataLoader.

  • A Dataset represents a collection of data, responsible for loading and returning something.

  • A DataLoader draws samples from a Dataset and returns batched samples.

The key here is that a Dataset is not limited to returning 1 sample at a time, we can make it return the K augmented versions of the same image.

Aug
Aug
Aug
Sample
Aug Sample 1
Aug Sample i
Aug Sample K

In code, this is done by subclassing the Dataset class and overriding the __getitem__ method.

def duplicate(x): return x, deepcopy(x), deepcopy(x) @@ -25,7 +25,7 @@ def __getitem__(self, index): x, y = self.dataset[index] return self.aug(x), y -

In the above example, we have a Dataset that returns 3 duplicate versions of the same image. By leveraging this technique, we can create a Dataset that returns K augmented versions of the same image as a tuple

Premature End of Epoch due to Small Labelled Set

See: frdc/train/frdc_datamodule.py

In MixMatch, the definition of an "epoch" is a bit different. Instead of implying that we have seen all the data once, it implies that we've drawn N batches. The N is referred to as the number of iterations per epoch.

Take for example, a labelled set of numbers [1, 2, 3] and an unlabelled set [4, 5, 6, 7, 8, 9, 10]. With batch size of 2, we'll run out of labelled samples after 2 iterations, but we'll still have 3 more iterations for the unlabelled set.

  • Draw 1: [1, 2], [4, 5]

  • Draw 2: [3], [6, 7].

  • Epoch ends.

Solution 2: Random Sampling

To fix this, instead of sequentially sampling the labelled set (and the unlabelled set), we can sample them randomly. This way, we can ensure that it never runs out.

  • Draw 1: [1, 3], [7, 5]

  • Draw 2: [2, 1], [4, 9]

  • Draw 3: [3, 2], [8, 6]

  • ... and so on.

Luckily, PyTorch's DataLoader supports random sampling. We just need to use RandomSampler instead of SequentialSampler (which is the default).

+

In the above example, we have a Dataset that returns 3 duplicate versions of the same image. By leveraging this technique, we can create a Dataset that returns K augmented versions of the same image as a tuple

Premature End of Epoch due to Small Labelled Set

See: frdc/train/frdc_datamodule.py

In MixMatch, the definition of an "epoch" is a bit different. Instead of implying that we have seen all the data once, it implies that we've drawn N batches. The N is referred to as the number of iterations per epoch.

Take for example, a labelled set of numbers [1, 2, 3] and an unlabelled set [4, 5, 6, 7, 8, 9, 10]. With batch size of 2, we'll run out of labelled samples after 2 iterations, but we'll still have 3 more iterations for the unlabelled set.

  • Draw 1: [1, 2], [4, 5]

  • Draw 2: [3], [6, 7].

  • Epoch ends.

Solution 2: Random Sampling

To fix this, instead of sequentially sampling the labelled set (and the unlabelled set), we can sample them randomly. This way, we can ensure that it never runs out.

  • Draw 1: [1, 3], [7, 5]

  • Draw 2: [2, 1], [4, 9]

  • Draw 3: [3, 2], [8, 6]

  • ... and so on.

Luckily, PyTorch's DataLoader supports random sampling. We just need to use RandomSampler instead of SequentialSampler (which is the default).

from torch.utils.data import DataLoader, RandomSampler dl = DataLoader( @@ -36,4 +36,4 @@ replacement=False, ) ) -

This will ensure that the "epoch" ends when we've drawn train_iters batches

Last modified: 26 June 2024
\ No newline at end of file +

This will ensure that the "epoch" ends when we've drawn train_iters batches

Last modified: 26 June 2024
\ No newline at end of file diff --git a/docs/get-started-with-dev-containers.html b/docs/get-started-with-dev-containers.html index 5aa1c5f3..b0579d13 100644 --- a/docs/get-started-with-dev-containers.html +++ b/docs/get-started-with-dev-containers.html @@ -1,5 +1,5 @@ -Get Started with Dev Containers | Documentation

Documentation 0.1.2 Help

Get Started with Dev Containers

Dev. Containers are a great way to get started with a project. They define all necessary dependencies and environments, so you can just start coding within the container.

In this article, we'll only go over additional steps to set up with our project. For more information on how to use Dev Containers, please refer to the official documentation for each IDE. Once you've set up the Dev Container, come back here to finish the setup:

Python Environment

The dev environment is already created and is managed by Anaconda /opt/conda/bin/conda. To activate the environment, run the following command:

+}

Documentation 0.1.2 Help

Get Started with Dev Containers

Dev. Containers are a great way to get started with a project. They define all necessary dependencies and environments, so you can just start coding within the container.

In this article, we'll only go over additional steps to set up with our project. For more information on how to use Dev Containers, please refer to the official documentation for each IDE. Once you've set up the Dev Container, come back here to finish the setup:

Python Environment

The dev environment is already created and is managed by Anaconda /opt/conda/bin/conda. To activate the environment, run the following command:

conda activate base -

Mark as Sources Root (Add to PYTHONPATH)

For import statements to work, you need to mark the src folder as the sources root. Optionally, also mark the tests folder as the tests root.

Additional Setup

Refer to the Getting Started guide for additional setup steps such as:

  • Google Cloud Application Default Credentials

  • Weight & Bias API Key

  • Label Studio API Key

Last modified: 26 June 2024
\ No newline at end of file +

Mark as Sources Root (Add to PYTHONPATH)

For import statements to work, you need to mark the src folder as the sources root. Optionally, also mark the tests folder as the tests root.

Additional Setup

Refer to the Getting Started guide for additional setup steps such as:

  • Google Cloud Application Default Credentials

  • Weight & Bias API Key

  • Label Studio API Key

Last modified: 26 June 2024
\ No newline at end of file diff --git a/docs/getting-started.html b/docs/getting-started.html index 375a039b..76619f33 100644 --- a/docs/getting-started.html +++ b/docs/getting-started.html @@ -1,5 +1,5 @@ -Getting Started | Documentation

Documentation 0.1.2 Help

Getting Started

Installing the Dev. Environment

  1. Ensure that you have the right version of Python. The required Python version can be seen in pyproject.toml

    - [tool.poetry.dependencies] - python = "..." -
  2. Start by cloning our repository.

    - git clone https://github.com/FR-DC/FRDC-ML.git -
  3. Then, create a Python Virtual Env pyvenv

    python -m venv venv/
    python3 -m venv venv/
  4. Install Poetry Then check if it's installed with

    poetry --version
  5. Activate the virtual environment

    - cd venv/Scripts - activate - cd ../.. -
    - source venv/bin/activate -
  6. Install the dependencies. You should be in the same directory as pyproject.toml

    - poetry install --with dev -
  7. Install Pre-Commit Hooks

    - pre-commit install -

Setting Up Google Cloud

  1. We use Google Cloud to store our datasets. To set up Google Cloud, install the Google Cloud CLI

  2. Then, authenticate your account.

    gcloud auth login
  3. Finally, set up Application Default Credentials (ADC).

    gcloud auth application-default login
  4. To make sure everything is working, run the tests.

Setting Up Label Studio

  1. We use Label Studio to annotate our datasets. We won't go through how to install Label Studio, for contributors, it should be up on localhost:8080.

  2. Then, retrieve your own API key from Label Studio. Go to your account page and copy the API key.


  3. Set your API key as an environment variable.

    In Windows, go to "Edit environment variables for your account" and add this as a new environment variable with name LABEL_STUDIO_API_KEY.

    Export it as an environment variable.

    export LABEL_STUDIO_API_KEY=...

Setting Up Weight and Biases

  1. We use W&B to track our experiments. To set up W&B, install the W&B CLI

  2. Then, authenticate your account.

    wandb login

Pre-commit Hooks

  • - pre-commit install -

Running the Tests

  • Run the tests to make sure everything is working

    - pytest -

Troubleshooting

ModuleNotFoundError

It's likely that your src and tests directories are not in PYTHONPATH. To fix this, run the following command:

+}

Documentation 0.1.2 Help

Getting Started

Installing the Dev. Environment

  1. Ensure that you have the right version of Python. The required Python version can be seen in pyproject.toml

    + [tool.poetry.dependencies] + python = "..." +
  2. Start by cloning our repository.

    + git clone https://github.com/FR-DC/FRDC-ML.git +
  3. Then, create a Python Virtual Env pyvenv

    python -m venv venv/
    python3 -m venv venv/
  4. Install Poetry Then check if it's installed with

    poetry --version
  5. Activate the virtual environment

    + cd venv/Scripts + activate + cd ../.. +
    + source venv/bin/activate +
  6. Install the dependencies. You should be in the same directory as pyproject.toml

    + poetry install --with dev +
  7. Install Pre-Commit Hooks

    + pre-commit install +

Setting Up Google Cloud

  1. We use Google Cloud to store our datasets. To set up Google Cloud, install the Google Cloud CLI

  2. Then, authenticate your account.

    gcloud auth login
  3. Finally, set up Application Default Credentials (ADC).

    gcloud auth application-default login
  4. To make sure everything is working, run the tests.

Setting Up Label Studio

  1. We use Label Studio to annotate our datasets. We won't go through how to install Label Studio, for contributors, it should be up on localhost:8080.

  2. Then, retrieve your own API key from Label Studio. Go to your account page and copy the API key.


  3. Set your API key as an environment variable.

    In Windows, go to "Edit environment variables for your account" and add this as a new environment variable with name LABEL_STUDIO_API_KEY.

    Export it as an environment variable.

    export LABEL_STUDIO_API_KEY=...

    In all cases, you can create a .env file in the root of the project and add the following line: LABEL_STUDIO_API_KEY=...

Setting Up Weight and Biases

  1. We use W&B to track our experiments. To set up W&B, install the W&B CLI

  2. Then, authenticate your account.

    wandb login

Pre-commit Hooks

  • + pre-commit install +

Running the Tests

  • Run the tests to make sure everything is working

    + pytest +

Troubleshooting

ModuleNotFoundError

It's likely that your src and tests directories are not in PYTHONPATH. To fix this, run the following command:

export PYTHONPATH=$PYTHONPATH:./src:./tests -

Or, set it in your IDE, for example, IntelliJ allows setting directories as Source Roots.

google.auth.exceptions.DefaultCredentialsError

It's likely that you haven't authenticated your Google Cloud account. See Setting Up Google Cloud

Couldn't connect to Label Studio

Label Studio must be running locally, exposed on localhost:8080. Furthermore, you need to specify the LABEL_STUDIO_API_KEY environment variable. See Setting Up Label Studio

Cannot login to W&B

You need to authenticate your W&B account. See Setting Up Weight and Biases If you're facing difficulties, set the WANDB_MODE environment variable to offline to disable W&B.

Our Repository Structure

Before starting development, take a look at our repository structure. This will help you understand where to put your code.

Core Dependencies
Resources
Tests
Repo Dependencies
Dataset Loaders
Preprocessing Fn.
Train Deps
Model Architectures
Datasets ...
FRDC
src/frdc/
rsc/
tests/
pyproject.toml,poetry.lock
./load/
./preprocess/
./train/
./models/
./dataset_name/
src/frdc/

Source Code for our package. These are the unit components of our pipeline.

rsc/

Resources. These are usually cached datasets

tests/

PyTest tests. These are unit, integration, and model tests.

Unit, Integration, and Pipeline Tests

We have 3 types of tests:

  • Unit Tests are usually small, single function tests.

  • Integration Tests are larger tests that tests a mock pipeline.

  • Model Tests are the true production pipeline tests that will generate a model.

Where Should I contribute?

Changing a small component

If you're changing a small component, such as a argument for preprocessing, a new model architecture, or a new configuration for a dataset, take a look at the src/frdc/ directory.

Adding a test

By adding a new component, you'll need to add a new test. Take a look at the tests/ directory.

Changing the model pipeline

If you're a ML Researcher, you'll probably be changing the pipeline. Take a look at the tests/model_tests/ directory.

Adding a dependency

If you're adding a new dependency, use poetry add PACKAGE and commit the changes to pyproject.toml and poetry.lock.

Last modified: 26 June 2024
\ No newline at end of file +

Or, set it in your IDE, for example, IntelliJ allows setting directories as Source Roots.

google.auth.exceptions.DefaultCredentialsError

It's likely that you haven't authenticated your Google Cloud account. See Setting Up Google Cloud

Couldn't connect to Label Studio

Label Studio must be running locally, exposed on localhost:8080. Furthermore, you need to specify the LABEL_STUDIO_API_KEY environment variable. See Setting Up Label Studio

Cannot login to W&B

You need to authenticate your W&B account. See Setting Up Weight and Biases If you're facing difficulties, set the WANDB_MODE environment variable to offline to disable W&B.

Our Repository Structure

Before starting development, take a look at our repository structure. This will help you understand where to put your code.

Core Dependencies
Resources
Tests
Repo Dependencies
Dataset Loaders
Preprocessing Fn.
Train Deps
Model Architectures
Datasets ...
FRDC
src/frdc/
rsc/
tests/
pyproject.toml,poetry.lock
./load/
./preprocess/
./train/
./models/
./dataset_name/
src/frdc/

Source Code for our package. These are the unit components of our pipeline.

rsc/

Resources. These are usually cached datasets

tests/

PyTest tests. These are unit, integration, and model tests.

Unit, Integration, and Pipeline Tests

We have 3 types of tests:

  • Unit Tests are usually small, single function tests.

  • Integration Tests are larger tests that tests a mock pipeline.

  • Model Tests are the true production pipeline tests that will generate a model.

Where Should I contribute?

Changing a small component

If you're changing a small component, such as a argument for preprocessing, a new model architecture, or a new configuration for a dataset, take a look at the src/frdc/ directory.

Adding a test

By adding a new component, you'll need to add a new test. Take a look at the tests/ directory.

Changing the model pipeline

If you're a ML Researcher, you'll probably be changing the pipeline. Take a look at the tests/model_tests/ directory.

Adding a dependency

If you're adding a new dependency, use poetry add PACKAGE and commit the changes to pyproject.toml and poetry.lock.

Last modified: 26 June 2024
\ No newline at end of file diff --git a/docs/load-dataset.html b/docs/load-dataset.html index 7522b773..cdfafda1 100644 --- a/docs/load-dataset.html +++ b/docs/load-dataset.html @@ -1,5 +1,5 @@ -load.dataset | Documentation

Documentation 0.1.2 Help

load.dataset

Usage

Firstly, to load a dataset instance, you need to initiliaze a FRDCDataset object, providing the site, date, and version.

We recommend using the FRDCDatasetPreset module to load explicitly known datasets.

+}

Documentation 0.1.2 Help

load.dataset

Usage

Firstly, to load a dataset instance, you need to initiliaze a FRDCDataset object, providing the site, date, and version.

We recommend using the FRDCDatasetPreset module to load explicitly known datasets.

from frdc.load.preset import FRDCDatasetPreset ds = FRDCDatasetPreset.chestnut_20201218() -

Then, we can use the ds object to load objects of the dataset:

+

Then, we can use the ds object to load objects of the dataset:

ar, order = ds._get_ar_bands() d = ds._get_ar_bands_as_dict() bounds, labels = ds._get_legacy_bounds_and_labels() -
  • ar is a stacked NDArray of the hyperspectral bands of shape (H x W x C)

  • order is a list of strings, containing the names of the bands, ordered according to the channels of ar

  • d is a dictionary of the hyperspectral bands of shape (H x W), keyed by the band names

  • bounds is a list of bounding boxes, in the format of Rect, a namedtuple of x0, y0, x1, y1

  • labels is a list of strings, containing the labels of the bounding boxes, ordered according to bounds

I can't find a dataset!

Some datasets, especially new ones may be unregistered and you must specify the exact site / date / version of it.

+
  • ar is a stacked NDArray of the hyperspectral bands of shape (H x W x C)

  • order is a list of strings, containing the names of the bands, ordered according to the channels of ar

  • d is a dictionary of the hyperspectral bands of shape (H x W), keyed by the band names

  • bounds is a list of bounding boxes, in the format of Rect, a namedtuple of x0, y0, x1, y1

  • labels is a list of strings, containing the labels of the bounding boxes, ordered according to bounds

I can't find a dataset!

Some datasets, especially new ones may be unregistered and you must specify the exact site / date / version of it.

from frdc.load.dataset import FRDCDataset ds = FRDCDataset(site="mysite", date="mydate", version="myversion") -

See below for examples on how to format this.

  • site="ds"

  • date="date"

  • version="ver"

  • site="ds"

  • date="date"

  • version="ver/01/data"

  • site="ds"

  • date="date"

  • version=None

Last modified: 26 June 2024
\ No newline at end of file +

See below for examples on how to format this.

  • site="ds"

  • date="date"

  • version="ver"

  • site="ds"

  • date="date"

  • version="ver/01/data"

  • site="ds"

  • date="date"

  • version=None

Last modified: 26 June 2024
\ No newline at end of file diff --git a/docs/load-gcs.html b/docs/load-gcs.html index aa9fd5e1..bf12b421 100644 --- a/docs/load-gcs.html +++ b/docs/load-gcs.html @@ -1,5 +1,5 @@ -load.gcs | Documentation

Documentation 0.1.2 Help

load.gcs

Usage

These are defined in the top-level load.gcs module.

list_gcs_datasets

Lists all datasets in the bucket as a DataFrame. This works by checking which folders have a specific file, which we call the anchor.

download

Downloads a file from Google Cloud Storage and returns the local file path.

open_file

Downloads and opens a file from Google Cloud Storage. Returns a file handle.

open_image

Downloads and returns the PIL image from Google Cloud Storage.

Pathing

The path to specify is relative to the bucket, which is frdc-ds by default.

For example this filesystem on GCS:

+}

Documentation 0.1.2 Help

load.gcs

Usage

These are defined in the top-level load.gcs module.

list_gcs_datasets

Lists all datasets in the bucket as a DataFrame. This works by checking which folders have a specific file, which we call the anchor.

download

Downloads a file from Google Cloud Storage and returns the local file path.

open_file

Downloads and opens a file from Google Cloud Storage. Returns a file handle.

open_image

Downloads and returns the PIL image from Google Cloud Storage.

Pathing

The path to specify is relative to the bucket, which is frdc-ds by default.

For example this filesystem on GCS:

# On Google Cloud Storage frdc-ds ├── chestnut_nature_park │ └── 20201218 │ └── 90deg │ └── bounds.json -

To download bounds.json, use download(r"chestnut_nature_park/20201218/90deg/bounds.json"). By default, all files will be downloaded to PROJ_DIR/rsc/....

+

To download bounds.json, use download(r"chestnut_nature_park/20201218/90deg/bounds.json"). By default, all files will be downloaded to PROJ_DIR/rsc/....

# On local filesystem PROJ_DIR ├── rsc @@ -28,4 +28,4 @@ │ └── 20201218 │ └── 90deg │ └── bounds.json -

Configuration

If you need granular control over

  • where the files are downloaded

  • the credentials used

  • the project used

  • the bucket used

Then edit conf.py.

GCS_CREDENTIALS

Google Cloud credentials.


A google.oauth2.service_account.Credentials object. See the object documentation for more information.

LOCAL_DATASET_ROOT_DIR

Local directory to download files to.


Path to a directory, or a Path object.

GCS_PROJECT_ID

Google Cloud project ID.


GCS_BUCKET_NAME

Google Cloud Storage bucket name.


Last modified: 26 June 2024
\ No newline at end of file +

Configuration

If you need granular control over

  • where the files are downloaded

  • the credentials used

  • the project used

  • the bucket used

Then edit conf.py.

GCS_CREDENTIALS

Google Cloud credentials.


A google.oauth2.service_account.Credentials object. See the object documentation for more information.

LOCAL_DATASET_ROOT_DIR

Local directory to download files to.


Path to a directory, or a Path object.

GCS_PROJECT_ID

Google Cloud project ID.


GCS_BUCKET_NAME

Google Cloud Storage bucket name.


Last modified: 26 June 2024
\ No newline at end of file diff --git a/docs/mix-match-module.html b/docs/mix-match-module.html index e9524435..98364a0b 100644 --- a/docs/mix-match-module.html +++ b/docs/mix-match-module.html @@ -1,5 +1,5 @@ -MixMatch Module | Documentation

Documentation 0.1.2 Help

MixMatch Module

See frdc/train/mixmatch_module.py.

Quick Recap

We will go over the essential parts of the code here. Before that, we revise some of the concepts that are used in the code.

Abstract Methods

In Python, we can define abstract methods using the abc module. Just like other OOP languages, abstract methods are methods that must be implemented by the child class.

For example:

+}

Documentation 0.1.2 Help

MixMatch Module

See frdc/train/mixmatch_module.py.

Quick Recap

We will go over the essential parts of the code here. Before that, we revise some of the concepts that are used in the code.

Abstract Methods

In Python, we can define abstract methods using the abc module. Just like other OOP languages, abstract methods are methods that must be implemented by the child class.

For example:

from abc import ABC, abstractmethod @@ -26,7 +26,7 @@ class MyChildClass(MyAbstractClass): def my_abstract_method(self): print("Hello World!") -

nn.Module & LightningModule

If you're unfamiliar with PyTorch, you should read the nn.Module Documentation.

nn.Module is the base class for all neural network modules in PyTorch. While LightningModule is a PyTorch Lightning class that extends nn.Module, providing it with additional functionality that reduces boilerplate code.

By implementing it as a LightningModule, we also enter the PyTorch Lightning ecosystem, which provides us with a lot of useful features such as logging, early stopping, and more.

What do we implement in a Module?

One key component that nn.Module requires, is the model. So for example:

+

nn.Module & LightningModule

If you're unfamiliar with PyTorch, you should read the nn.Module Documentation.

nn.Module is the base class for all neural network modules in PyTorch. While LightningModule is a PyTorch Lightning class that extends nn.Module, providing it with additional functionality that reduces boilerplate code.

By implementing it as a LightningModule, we also enter the PyTorch Lightning ecosystem, which provides us with a lot of useful features such as logging, early stopping, and more.

What do we implement in a Module?

One key component that nn.Module requires, is the model. So for example:

class MyModule(nn.Module): def __init__(self): super().__init__() @@ -38,7 +38,7 @@ def forward(self, x): return self.model(x) -

PyTorch Lightning builds on top of it, requiring training_step and validation_step. Each "step" is a batch of data, and the model is trained on it. So for example:

+

PyTorch Lightning builds on top of it, requiring training_step and validation_step. Each "step" is a batch of data, and the model is trained on it. So for example:

class MyModule(LightningModule): def __init__(self): ... @@ -55,7 +55,7 @@ y_hat = self(x) loss = F.cross_entropy(y_hat, y) return loss -

Usually, the training and validation steps are the same, but in some cases, such as MixMatch, they are different. In MixMatch, we not only use a different loss function for train, we also handle a batch differently. The PyTorch Lightning framework allows us to separate the two, and implement them separately.

Model Embedded Preprocessing on_before_batch_transfer

In PyTorch Lightning, we can also inject a step before the batch is passed to the model. This is done by overriding the on_before_batch_transfer method.

Batch
on_before_batch_transfer
training_step
validation_step

This allows us to do preprocessing on the batch, such as scaling the data, encoding the labels, and more.

Custom EMA Update on_after_backward

We also leverage another hook, called on_after_backward. This hook is called after the backward pass, and allows us to do custom operations. In our case, we use it to update the EMA model.

Batch
training_step
on_after_backward
update_ema

MixMatch

We recommend having tests/model_tests/chestnut_dec_may/train.py open while reading this section. It implements a real-world example of MixMatch.

As a summary:

  1. We learned what is an abstract method, and how to implement it

  2. We implement the model in LightningModule much like we would in nn.Module

  3. We implement on_before_batch_transfer to preprocess the batch

  4. Finally, we implement on_after_backward to update the EMA model

With the above in mind, let's look at the MixMatch implementation.

forward (abstract)

Forward pass of the model

ema_model (abstract)

The model that is used for EMA. We expect this property to be implemented by the child class.

update_ema (abstract)

The method to update the EMA model. We expect this method to be implemented by the child class.

loss_unl_scaler (static)

Takes in the current progress of the training, 0.0 to 1.0, where 0.0 is the start of the training, and 1.0 is the end. Then, returns the multiplier for the unlabeled loss.

loss_lbl (static)

Implements the loss for labeled data. Takes in the predicted labels and the ground truth labels, and returns the loss. This is cross entropy for MixMatch.

loss_unl (static)

Implements the loss for unlabeled data. Takes in the predicted labels and the ground truth labels, and returns the loss. This is MSE for MixMatch.

mixup

Takes in the data and the labels, the beta distribution parameter, and returns the mixed data and labels.

sharpen

Takes in the labels and temperature, and returns the sharpened labels.

guess_labels

Takes in the unlabeled data, and returns the guessed labels.

progress

The current progress of the training, 0.0 to 1.0, where 0.0 is the start of the training, and 1.0 is the end.

training_step

The training step runs through 1 batch of data, and returns the loss. Note that this is significantly different from validation step, as we handle the K-Augmented data differently.

test / validation_step

The test / validation step runs through 1 batch of data, and returns the loss.

predict_step

The predict step runs through 1 batch of data, and returns the actual decoded labels.

on_after_backward

The on_after_backward hook is called after the backward pass, and allows us to do custom operations. In our case, we use it to update the EMA model.

on_before_batch_transfer

The on_before_batch_transfer hook is called before the batch is transferred to the GPU. In our case, we use it to preprocess the batch.

A diagram of how these components interact with each other is shown below:

Batch
on_before_batch_transfer
training_step
guess_labels
sharpen
mix_up
loss_unl
loss_unl_scaler
loss
loss_lbl
backward
on_after_backward
update_ema
validation_step
loss

Finally, we show an example of how to use the MixMatch module:

+

Usually, the training and validation steps are the same, but in some cases, such as MixMatch, they are different. In MixMatch, we not only use a different loss function for train, we also handle a batch differently. The PyTorch Lightning framework allows us to separate the two, and implement them separately.

Model Embedded Preprocessing on_before_batch_transfer

In PyTorch Lightning, we can also inject a step before the batch is passed to the model. This is done by overriding the on_before_batch_transfer method.

Batch
on_before_batch_transfer
training_step
validation_step

This allows us to do preprocessing on the batch, such as scaling the data, encoding the labels, and more.

Custom EMA Update on_after_backward

We also leverage another hook, called on_after_backward. This hook is called after the backward pass, and allows us to do custom operations. In our case, we use it to update the EMA model.

Batch
training_step
on_after_backward
update_ema

MixMatch

We recommend having tests/model_tests/chestnut_dec_may/train.py open while reading this section. It implements a real-world example of MixMatch.

As a summary:

  1. We learned what is an abstract method, and how to implement it

  2. We implement the model in LightningModule much like we would in nn.Module

  3. We implement on_before_batch_transfer to preprocess the batch

  4. Finally, we implement on_after_backward to update the EMA model

With the above in mind, let's look at the MixMatch implementation.

forward (abstract)

Forward pass of the model

ema_model (abstract)

The model that is used for EMA. We expect this property to be implemented by the child class.

update_ema (abstract)

The method to update the EMA model. We expect this method to be implemented by the child class.

loss_unl_scaler (static)

Takes in the current progress of the training, 0.0 to 1.0, where 0.0 is the start of the training, and 1.0 is the end. Then, returns the multiplier for the unlabeled loss.

loss_lbl (static)

Implements the loss for labeled data. Takes in the predicted labels and the ground truth labels, and returns the loss. This is cross entropy for MixMatch.

loss_unl (static)

Implements the loss for unlabeled data. Takes in the predicted labels and the ground truth labels, and returns the loss. This is MSE for MixMatch.

mixup

Takes in the data and the labels, the beta distribution parameter, and returns the mixed data and labels.

sharpen

Takes in the labels and temperature, and returns the sharpened labels.

guess_labels

Takes in the unlabeled data, and returns the guessed labels.

progress

The current progress of the training, 0.0 to 1.0, where 0.0 is the start of the training, and 1.0 is the end.

training_step

The training step runs through 1 batch of data, and returns the loss. Note that this is significantly different from validation step, as we handle the K-Augmented data differently.

test / validation_step

The test / validation step runs through 1 batch of data, and returns the loss.

predict_step

The predict step runs through 1 batch of data, and returns the actual decoded labels.

on_after_backward

The on_after_backward hook is called after the backward pass, and allows us to do custom operations. In our case, we use it to update the EMA model.

on_before_batch_transfer

The on_before_batch_transfer hook is called before the batch is transferred to the GPU. In our case, we use it to preprocess the batch.

A diagram of how these components interact with each other is shown below:

Batch
on_before_batch_transfer
training_step
guess_labels
sharpen
mix_up
loss_unl
loss_unl_scaler
loss
loss_lbl
backward
on_after_backward
update_ema
validation_step
loss

Finally, we show an example of how to use the MixMatch module:

from sklearn.preprocessing import StandardScaler, OrdinalEncoder from frdc.train.mixmatch_module import MixMatchModule @@ -75,7 +75,7 @@ sharpen_temp=0.5, mix_beta_alpha=0.75, ) -

In particular, we need to supply some transformations for the preprocessing step. In this case, we use StandardScaler to scale the data, and OrdinalEncoder to encode the labels.

  1. It's best if standardization is done only on the training data, and not the validation data to better fit real-world scenarios.

  2. We use OrdinalEncoder as it handles unseen labels. So if a class doesn't show up in the training data, it will be encoded as np.nan, and will not participate in the loss calculation.

Design Choices

Static Method Overriding

We implement many functions as static, as we believe that a functional style reduces dependencies, thus making the code easier to test and debug.

Furthermore, it allows the subclasses to easily override the functions, to customize the behavior of the MixMatch module.

For example, the loss_unl_scaler function is static, thus, we can implement our own scaling function, and pass it to the MixMatch module.

+

In particular, we need to supply some transformations for the preprocessing step. In this case, we use StandardScaler to scale the data, and OrdinalEncoder to encode the labels.

  1. It's best if standardization is done only on the training data, and not the validation data to better fit real-world scenarios.

  2. We use OrdinalEncoder as it handles unseen labels. So if a class doesn't show up in the training data, it will be encoded as np.nan, and will not participate in the loss calculation.

Design Choices

Static Method Overriding

We implement many functions as static, as we believe that a functional style reduces dependencies, thus making the code easier to test and debug.

Furthermore, it allows the subclasses to easily override the functions, to customize the behavior of the MixMatch module.

For example, the loss_unl_scaler function is static, thus, we can implement our own scaling function, and pass it to the MixMatch module.

def my_loss_unl_scaler(progress: float) -> float: return progress ** 2 @@ -83,4 +83,4 @@ @staticmethod def loss_unl_scaler(progress: float) -> float: return my_loss_unl_scaler(progress) -

If we had used a method instead, we would have to consider instance state, which would make it harder to override.

Why not use Dataclasses?

One of the biggest caveats of nn.Module is that it requires super().__init__() to be called before anything is assigned. While dataclass can leverage __post_init__ to do the same, we felt that this was too much of a hassle to save a few keystrokes. Thus, we opted to use __init__ instead, while more verbose, it is more explicit.

Why use PyTorch Lightning?

While we did hit some road blocks implementing SSL, due to its complex and unconventional nature, we felt that the benefits of using PyTorch Lightning outweighed the cons.

on_before_batch_transfer and on_after_backward are unconventional hooks, and we had to do some digging to find them. It can be argued that by just writing explicit code, we can avoid the need for these hooks, but the PyTorch ecosystem fixes many other issues, so we closed an eye on this.

References

Last modified: 26 June 2024
\ No newline at end of file +

If we had used a method instead, we would have to consider instance state, which would make it harder to override.

Why not use Dataclasses?

One of the biggest caveats of nn.Module is that it requires super().__init__() to be called before anything is assigned. While dataclass can leverage __post_init__ to do the same, we felt that this was too much of a hassle to save a few keystrokes. Thus, we opted to use __init__ instead, while more verbose, it is more explicit.

Why use PyTorch Lightning?

While we did hit some road blocks implementing SSL, due to its complex and unconventional nature, we felt that the benefits of using PyTorch Lightning outweighed the cons.

on_before_batch_transfer and on_after_backward are unconventional hooks, and we had to do some digging to find them. It can be argued that by just writing explicit code, we can avoid the need for these hooks, but the PyTorch ecosystem fixes many other issues, so we closed an eye on this.

References

Last modified: 26 June 2024
\ No newline at end of file diff --git a/docs/mix-match.html b/docs/mix-match.html index 18761d00..0b1edc52 100644 --- a/docs/mix-match.html +++ b/docs/mix-match.html @@ -1,5 +1,5 @@ -MixMatch | Documentation

Documentation 0.1.2 Help

MixMatch

In FRDC-ML, we leverage semi-supervised learning to improve the model's performance through better augmentation consistency and using even unlabelled data.

The algorithm we use is MixMatch. A state-of-the-art semi-supervised learning algorithm. It is based on the idea of consistency regularization, which encourages models to predict the same class even after augmentations that occur naturally in the real world.

Our implementation of MixMatch is a refactored version of YU1ut/MixMatch-pytorch We've refactored the code to follow more modern PyTorch practices, allowing us to utilize it with modern PyTorch frameworks such as PyTorch Lightning.

We won't go through the details of MixMatch here, see Our Documentation in our MixMatch-PyTorch-CIFAR10 repository for more details.

Implementation Details

  1. How we implemented the MixMatch logic MixMatchModule

  2. How we implemented the unique MixMatch data loading logic Custom MixMatch Data Loading

References

Last modified: 26 June 2024
\ No newline at end of file +}

Documentation 0.1.2 Help

MixMatch

In FRDC-ML, we leverage semi-supervised learning to improve the model's performance through better augmentation consistency and using even unlabelled data.

The algorithm we use is MixMatch. A state-of-the-art semi-supervised learning algorithm. It is based on the idea of consistency regularization, which encourages models to predict the same class even after augmentations that occur naturally in the real world.

Our implementation of MixMatch is a refactored version of YU1ut/MixMatch-pytorch We've refactored the code to follow more modern PyTorch practices, allowing us to utilize it with modern PyTorch frameworks such as PyTorch Lightning.

We won't go through the details of MixMatch here, see Our Documentation in our MixMatch-PyTorch-CIFAR10 repository for more details.

Implementation Details

  1. How we implemented the MixMatch logic MixMatchModule

  2. How we implemented the unique MixMatch data loading logic Custom MixMatch Data Loading

References

Last modified: 26 June 2024
\ No newline at end of file diff --git a/docs/ml-architecture.html b/docs/ml-architecture.html index 3016f86f..e62735d0 100644 --- a/docs/ml-architecture.html +++ b/docs/ml-architecture.html @@ -1,5 +1,5 @@ -ML Architecture | Documentation

Documentation 0.1.2 Help

ML Architecture

The architecture is the backbone of the project. If you're interested on how everything is pieced together, this article is for you.

In Machine Learning architectures, we mostly care about 2 things the data, and the model. As the name implies, DataModules, DataLoaders, Datasets deal with data, and Modules for model construction.

Data Classes

There's a small difference between the Data___ classes. Firstly, we load data in as Dataset instances, then preprocessed before being batched by DataLoader, finally, housed in DataModule.

DataModule
Train DataLoader
Validation DataLoader
Test DataLoader
Preprocess
Augmentations
Distortions
Alternatives
Cropping or Resizing
Scaling
Data Source
Load
Dataset
DataLoader

There are 2 IMPORTANT design decisions here:

Dataset and DataLoader

Data in Dataset are unbatched, data in DataLoader must be batched. This means that it's possible to have jagged tensors at this stage, however they must be made "stackable" before loading into the DataLoader.

For example, the data in Dataset could be of shapes [(8, 200, 100), (8, 100, 300), ...]. While, BEFORE loading into DataLoader must have equal shapes, for example [(8, 100, 100), (8, 100, 100), ...]

This is because when you initialize a DataLoader you need to include the batch_size, which implies the data are stacked in some manner.

This also leads to the reason why preprocessing must happen before the DataLoader

Preprocessing

Excluding functionalities to load the data, this is the step before the data is set in stone. So, steps such as augmentation, transformation, even analytics needs to be performed here as the data is in its "rawest" form.

We use this step to

  1. Construct alternative augmentations. i.e. images that we could've taken instead.

  2. Using those alternatives, add distortions. i.e. unintentional changes to the photo that reduces quality.

  3. Cropping or resizing the image.

  4. Scale the data. e.g. Standard Scaling, ZCA Scaling, etc.

The order of the steps are choice by design.

Modules

We analyze the inheritance structure of the Modules (also the ML Models):

Library Module
PyTorch Module
Lightning Module
FRDC Module
FixMatch Module
MixMatch Module
EfficientNetB1 FixMatch Module
EfficientNetB1 MixMatch Module
Custom Module

Custom Modules are our self-defined classes.

  • FRDC Module: This is the base class for all our models. Implements common functionality, such as partial saving of unfrozen parameters.

  • Y Module: Y is the architecture/framework of the model in our case, this only defines the method of training, not the actual model itself.

  • X Y Module: X defines the actual model being used within Y's framework.

To give an example, we look at EfficientNetB1FixMatchModule. Due to its naming scheme <Model><Framework>Module, we see that it's an EfficientNetB1 model used in the FixMatch framework.

Furthermore, because it's well decoupled, implementing a new model is as easy as overriding some defaults.

Last modified: 26 June 2024
\ No newline at end of file +}

Documentation 0.1.2 Help

ML Architecture

The architecture is the backbone of the project. If you're interested on how everything is pieced together, this article is for you.

In Machine Learning architectures, we mostly care about 2 things the data, and the model. As the name implies, DataModules, DataLoaders, Datasets deal with data, and Modules for model construction.

Data Classes

There's a small difference between the Data___ classes. Firstly, we load data in as Dataset instances, then preprocessed before being batched by DataLoader, finally, housed in DataModule.

DataModule
Train DataLoader
Validation DataLoader
Test DataLoader
Preprocess
Augmentations
Distortions
Alternatives
Cropping or Resizing
Scaling
Data Source
Load
Dataset
DataLoader

There are 2 IMPORTANT design decisions here:

Dataset and DataLoader

Data in Dataset are unbatched, data in DataLoader must be batched. This means that it's possible to have jagged tensors at this stage, however they must be made "stackable" before loading into the DataLoader.

For example, the data in Dataset could be of shapes [(8, 200, 100), (8, 100, 300), ...]. While, BEFORE loading into DataLoader must have equal shapes, for example [(8, 100, 100), (8, 100, 100), ...]

This is because when you initialize a DataLoader you need to include the batch_size, which implies the data are stacked in some manner.

This also leads to the reason why preprocessing must happen before the DataLoader

Preprocessing

Excluding functionalities to load the data, this is the step before the data is set in stone. So, steps such as augmentation, transformation, even analytics needs to be performed here as the data is in its "rawest" form.

We use this step to

  1. Construct alternative augmentations. i.e. images that we could've taken instead.

  2. Using those alternatives, add distortions. i.e. unintentional changes to the photo that reduces quality.

  3. Cropping or resizing the image.

  4. Scale the data. e.g. Standard Scaling, ZCA Scaling, etc.

The order of the steps are choice by design.

Modules

We analyze the inheritance structure of the Modules (also the ML Models):

Library Module
PyTorch Module
Lightning Module
FRDC Module
FixMatch Module
MixMatch Module
EfficientNetB1 FixMatch Module
EfficientNetB1 MixMatch Module
Custom Module

Custom Modules are our self-defined classes.

  • FRDC Module: This is the base class for all our models. Implements common functionality, such as partial saving of unfrozen parameters.

  • Y Module: Y is the architecture/framework of the model in our case, this only defines the method of training, not the actual model itself.

  • X Y Module: X defines the actual model being used within Y's framework.

To give an example, we look at EfficientNetB1FixMatchModule. Due to its naming scheme <Model><Framework>Module, we see that it's an EfficientNetB1 model used in the FixMatch framework.

Furthermore, because it's well decoupled, implementing a new model is as easy as overriding some defaults.

Last modified: 26 June 2024
\ No newline at end of file diff --git a/docs/model-test-chestnut-may-dec.html b/docs/model-test-chestnut-may-dec.html index cf969761..585bd93a 100644 --- a/docs/model-test-chestnut-may-dec.html +++ b/docs/model-test-chestnut-may-dec.html @@ -1,5 +1,5 @@ -Model Test Chestnut May-Dec | Documentation

Documentation 0.1.2 Help

Model Test Chestnut May-Dec

This test is used to evaluate the model performance on the Chestnut Nature Park May & December dataset.

See this script in model_tests/chestnut_dec_may/train.py.

Motivation

The usage of this model will be to classify trees in unseen datasets under different conditions. In this test, we'll evaluate it under a different season.

A caveat is that it'll be evaluated on the same set of trees, so it's not a representative of a field-test. However, given difficulties of yielding datasets, this still gives us a good preliminary idea of how the model will perform in different conditions.

Methodology

We train on the December dataset, and test on the May dataset.

Labelled Train
Unlabelled Train
Test
DecDataset
Model
MayDataset

Despite not having any true unlabelled data, we use MixMatch by treating the labelled data of the December dataset as unlabelled data.

Model

The current Model used is a simple InceptionV3 Transfer Learning model, with the last layer replaced with a fully connected layer(s).

SSL Loss
Input
InceptionV3 Frozen
FC Layer(s)
Softmax
Output

Preprocessing

For Training:

Segment
RandomCrop 299
Horizontal Flip 50%
Vertical Flip 50%
Normalize By Training Mean & Std

For Validation:

Segment
CenterCrop 299
Normalize By Training Mean & Std

For Evaluation:

Segment
CenterCrop 299
Normalize By Training Mean & Std
As Is
Horizontal Flip
Vertical Flip
Horizontal & Vertical Flip

For evaluation, we evaluate that the model should be invariant to horizontal and vertical flips, as well as the original image.

Hyperparameters

The following hyperparameters are used:

  • Optimizer: Adam

  • Learning Rate: 1e-3

  • Batch Size: 32

  • Epochs: 10

  • Train Iterations: 25~100

  • Validation Iterations: 10~25

  • Early Stopping: 4

Results

We evaluate around 40% accuracy on the test set, compared to 100% for the training set. This indicates that the model has saturated and is not able to learn anymore from the training set. There's no indication of overfitting as the validation loss just plateaus.

W&B Dashboard

Caveats

  • The test set is very small, so the results are not very representative.

  • The test set is the same set of trees, so it's not a true test of the model performance in different conditions.

  • There are many classes with 1 sample, so the model may not be able to learn the features of these classes well.

Last modified: 26 June 2024
\ No newline at end of file +}

Documentation 0.1.2 Help

Model Test Chestnut May-Dec

This test is used to evaluate the model performance on the Chestnut Nature Park May & December dataset.

See this script in model_tests/chestnut_dec_may/train.py.

Motivation

The usage of this model will be to classify trees in unseen datasets under different conditions. In this test, we'll evaluate it under a different season.

A caveat is that it'll be evaluated on the same set of trees, so it's not a representative of a field-test. However, given difficulties of yielding datasets, this still gives us a good preliminary idea of how the model will perform in different conditions.

Methodology

We train on the December dataset, and test on the May dataset.

Labelled Train
Unlabelled Train
Test
DecDataset
Model
MayDataset

Despite not having any true unlabelled data, we use MixMatch by treating the labelled data of the December dataset as unlabelled data.

Model

The current Model used is a simple InceptionV3 Transfer Learning model, with the last layer replaced with a fully connected layer(s).

SSL Loss
Input
InceptionV3 Frozen
FC Layer(s)
Softmax
Output

Preprocessing

For Training:

Segment
RandomCrop 299
Horizontal Flip 50%
Vertical Flip 50%
Normalize By Training Mean & Std

For Validation:

Segment
CenterCrop 299
Normalize By Training Mean & Std

For Evaluation:

Segment
CenterCrop 299
Normalize By Training Mean & Std
As Is
Horizontal Flip
Vertical Flip
Horizontal & Vertical Flip

For evaluation, we evaluate that the model should be invariant to horizontal and vertical flips, as well as the original image.

Hyperparameters

The following hyperparameters are used:

  • Optimizer: Adam

  • Learning Rate: 1e-3

  • Batch Size: 32

  • Epochs: 10

  • Train Iterations: 25~100

  • Validation Iterations: 10~25

  • Early Stopping: 4

Results

We evaluate around 40% accuracy on the test set, compared to 100% for the training set. This indicates that the model has saturated and is not able to learn anymore from the training set. There's no indication of overfitting as the validation loss just plateaus.

W&B Dashboard

Caveats

  • The test set is very small, so the results are not very representative.

  • The test set is the same set of trees, so it's not a true test of the model performance in different conditions.

  • There are many classes with 1 sample, so the model may not be able to learn the features of these classes well.

Last modified: 26 June 2024
\ No newline at end of file diff --git a/docs/overview.html b/docs/overview.html index f5833ce7..f818369f 100644 --- a/docs/overview.html +++ b/docs/overview.html @@ -1,5 +1,5 @@ -Overview | Documentation

Documentation 0.1.2 Help

Overview

Forest Recovery Digital Companion (FRDC) is a ML-assisted companion for ecologists to automatically classify surveyed trees via an Unmanned Aerial Vehicle (UAV).

This package, FRDC-ML is the Machine Learning backbone of this project, a centralized repository of tools and model architectures to be used in the FRDC pipeline.

Get started here

Other Projects

FRDC-UI

The User Interface Repository for FRDC, a WebApp GUI for ecologists to adjust annotations.

Last modified: 26 June 2024
\ No newline at end of file +}

Documentation 0.1.2 Help

Overview

Forest Recovery Digital Companion (FRDC) is a ML-assisted companion for ecologists to automatically classify surveyed trees via an Unmanned Aerial Vehicle (UAV).

This package, FRDC-ML is the Machine Learning backbone of this project, a centralized repository of tools and model architectures to be used in the FRDC pipeline.

Get started here

Other Projects

FRDC-UI

The User Interface Repository for FRDC, a WebApp GUI for ecologists to adjust annotations.

Last modified: 26 June 2024
\ No newline at end of file diff --git a/docs/preprocessing-extract-segments.html b/docs/preprocessing-extract-segments.html index e6922357..56f4caf3 100644 --- a/docs/preprocessing-extract-segments.html +++ b/docs/preprocessing-extract-segments.html @@ -1,5 +1,5 @@ -preprocessing.extract_segments | Documentation

Documentation 0.1.2 Help

preprocessing.extract_segments

Functions

extract_segments_from_labels

Extracts segments from a label classification.

extract_segments_from_bounds

Extracts segments from Rect bounds.

remove_small_segments_from_labels

Removes small segments from a label classification.

Extract with Boundaries

A boundary is a Rect object that represents the minimum bounding box of a segment, with x0, y0, x1, y1 coordinates.

It simply slices the original image to the bounding box. The origin is the top left corner of the image.

+}

Documentation 0.1.2 Help

preprocessing.extract_segments

Functions

extract_segments_from_labels

Extracts segments from a label classification.

extract_segments_from_bounds

Extracts segments from Rect bounds.

remove_small_segments_from_labels

Removes small segments from a label classification.

Extract with Boundaries

A boundary is a Rect object that represents the minimum bounding box of a segment, with x0, y0, x1, y1 coordinates.

It simply slices the original image to the bounding box. The origin is the top left corner of the image.

+-----------------+ +-----------+ | Original | | Segmented | | Image | | Image | @@ -24,7 +24,7 @@ +-----+-----+-----+ 1, 2, 0, 2 +-----+-----+ | 7 | 8 | 9 | x0 y0 x1 y1 | 8 | 9 | +-----+-----+-----+ +-----+-----+ -
+
+-----------------+ +-----------------+ | Original | | Segmented | | Image | | Image | @@ -35,7 +35,7 @@ +-----+-----+-----+ 1, 2, 0, 2 +-----+-----+-----+ | 7 | 8 | 9 | x0 y0 x1 y1 | 0 | 8 | 9 | +-----+-----+-----+ +-----+-----+-----+ -

Extract with Labels

A label classification is a np.ndarray where each pixel is mapped to a segment. The segments are mapped to a unique integer. In our project, the 0th label is the background.

For example, a label classification of 3 segments will look like this:

+

Extract with Labels

A label classification is a np.ndarray where each pixel is mapped to a segment. The segments are mapped to a unique integer. In our project, the 0th label is the background.

For example, a label classification of 3 segments will look like this:

+-----------------+ +-----------------+ | Label | | Original | | Classification | | Image | @@ -46,7 +46,7 @@ +-----+-----+-----+ +-----+-----+-----+ | 1 | 1 | 0 | | 7 | 8 | 9 | +-----+-----+-----+ +-----+-----+-----+ -

The extraction will take the minimum bounding box of each segment and return a list of segments.

For example, the label 1 and 2 extracted images will be

+

The extraction will take the minimum bounding box of each segment and return a list of segments.

For example, the label 1 and 2 extracted images will be

+-----------+ +-----------+ | Extracted | | Extracted | | Segment 1 | | Segment 2 | @@ -57,7 +57,7 @@ +-----+-----+ +-----+-----+ | 7 | 8 | +-----+-----+ -
+
+-----------------+ +-----------------+ | Extracted | | Extracted | | Segment 1 | | Segment 2 | @@ -68,7 +68,7 @@ +-----+-----+-----+ +-----+-----+-----+ | 7 | 8 | 0 | | 0 | 0 | 0 | +-----+-----+-----+ +-----+-----+-----+ -
  • If cropped is False, the segments are padded with 0s to the original image size. While this can ensure shape consistency, it can consume more memory for large images.

  • If cropped is True, the segments are cropped to the minimum bounding box. This can save memory, but the shape of the segments will be inconsistent.

Usage

Extract from Bounds and Labels

Extract segments from bounds and labels.

+
  • If cropped is False, the segments are padded with 0s to the original image size. While this can ensure shape consistency, it can consume more memory for large images.

  • If cropped is True, the segments are cropped to the minimum bounding box. This can save memory, but the shape of the segments will be inconsistent.

Usage

Extract from Bounds and Labels

Extract segments from bounds and labels.

import numpy as np from frdc.load.preset import FRDCDatasetPreset from frdc.preprocess.extract_segments import extract_segments_from_bounds @@ -78,7 +78,7 @@ bounds, labels = ds._get_legacy_bounds_and_labels() segments: list[np.ndarray] = extract_segments_from_bounds(ar, bounds) -

Extract from Auto-Segmentation

Extract segments from a label classification.

+

Extract from Auto-Segmentation

Extract segments from a label classification.

from skimage.morphology import remove_small_objects, remove_small_holes import numpy as np @@ -102,4 +102,4 @@ min_height=10, min_width=10) segments: list[np.ndarray] = extract_segments_from_labels(ar, ar_labels) -

API

extract_segments_from_labels(ar, ar_labels, cropped)

Extracts segments from a label classification.


ar_labels is a label classification as a np.ndarray

extract_segments_from_bounds(ar, bounds, cropped)

Extracts segments from Rect bounds.


bounds is a list of Rect bounds.

remove_small_segments_from_labels(ar_labels, min_height, min_width)

Removes small segments from a label classification.


Last modified: 26 June 2024
\ No newline at end of file +

API

extract_segments_from_labels(ar, ar_labels, cropped)

Extracts segments from a label classification.


ar_labels is a label classification as a np.ndarray

extract_segments_from_bounds(ar, bounds, cropped)

Extracts segments from Rect bounds.


bounds is a list of Rect bounds.

remove_small_segments_from_labels(ar_labels, min_height, min_width)

Removes small segments from a label classification.


Last modified: 26 June 2024
\ No newline at end of file diff --git a/docs/preprocessing-glcm-padded.html b/docs/preprocessing-glcm-padded.html index 749cfb70..40d174a1 100644 --- a/docs/preprocessing-glcm-padded.html +++ b/docs/preprocessing-glcm-padded.html @@ -1,5 +1,5 @@ -preprocessing.glcm_padded | Documentation

Documentation 0.1.2 Help

preprocessing.glcm_padded

Functions

glcm_padded

Computes the GLCM of the NDArray bands with padding.

glcm_padded_cached

Computes the GLCM of the NDArray bands with padding, and caches it.

append_glcm_padded_cached

Computes the GLCM of the NDArray bands with padding, and caches it and also appends it onto the original array.

Usage

We show a few examples of how to use the GLCM functions.

+}

Documentation 0.1.2 Help

preprocessing.glcm_padded

Functions

glcm_padded

Computes the GLCM of the NDArray bands with padding.

glcm_padded_cached

Computes the GLCM of the NDArray bands with padding, and caches it.

append_glcm_padded_cached

Computes the GLCM of the NDArray bands with padding, and caches it and also appends it onto the original array.

Usage

We show a few examples of how to use the GLCM functions.

import numpy as np from glcm_cupy import Features @@ -38,4 +38,4 @@ ar_glcm_cached_appended = append_glcm_padded_cached(ar, bin_from=1, bin_to=4, radius=3) -
  • ar_glcm is the GLCM of the original array, with the last dimension being the GLCM features. The number of features is determined by the features parameter, which defaults to all features.

  • ar_glcm_2_features selects only 2 features, with the last dimension being the 2 GLCM features specified.

  • ar_glcm_cached caches the GLCM so that if you call it again, it will return the cached version. It stores its data at the project root dir, under .cache/.

  • ar_glcm_cached_appended is a wrapper around ar_glcm_cached, it appends the GLCM features onto the original array. It's equivalent to calling ar_glcm_cached and then np.concatenate on the final axes.

Caching

GLCM is an expensive operation, thus we recommend to cache it if the input parameters will be the same. This is especially useful if you're experimenting with the same dataset with constant parameters.

API

glcm_padded(ar, bin_from, bin_to, radius, step_size, features)

Computes the GLCM of the NDArray bands with padding.


  • ar is the input array

  • bin_from is the upper bound of the input

  • bin_to is the upper bound of the GLCM input, i.e. the resolution that GLCM operates on

  • radius is the radius of the GLCM

  • step_size is the step size of the GLCM

  • features is the list of GLCM features to compute

The return shape is

See glcm_cupy for the GLCM Features.

glcm_padded_cached(ar, bin_from, bin_to, radius, step_size, features)

Computes the GLCM of the NDArray bands with padding, and caches it.


See glcm_padded for the parameters and output shape

append_glcm_padded_cached(ar, bin_from, bin_to, radius, step_size, features)

Computes the GLCM of the NDArray bands with padding, and caches it and also appends it onto the original array.


See glcm_padded for the parameters


The return shape is:

The function automatically flattens the last 2 dimensions of the GLCM features, and appends it onto the original array.

Last modified: 26 June 2024
\ No newline at end of file +
  • ar_glcm is the GLCM of the original array, with the last dimension being the GLCM features. The number of features is determined by the features parameter, which defaults to all features.

  • ar_glcm_2_features selects only 2 features, with the last dimension being the 2 GLCM features specified.

  • ar_glcm_cached caches the GLCM so that if you call it again, it will return the cached version. It stores its data at the project root dir, under .cache/.

  • ar_glcm_cached_appended is a wrapper around ar_glcm_cached, it appends the GLCM features onto the original array. It's equivalent to calling ar_glcm_cached and then np.concatenate on the final axes.

Caching

GLCM is an expensive operation, thus we recommend to cache it if the input parameters will be the same. This is especially useful if you're experimenting with the same dataset with constant parameters.

API

glcm_padded(ar, bin_from, bin_to, radius, step_size, features)

Computes the GLCM of the NDArray bands with padding.


  • ar is the input array

  • bin_from is the upper bound of the input

  • bin_to is the upper bound of the GLCM input, i.e. the resolution that GLCM operates on

  • radius is the radius of the GLCM

  • step_size is the step size of the GLCM

  • features is the list of GLCM features to compute

The return shape is

See glcm_cupy for the GLCM Features.

glcm_padded_cached(ar, bin_from, bin_to, radius, step_size, features)

Computes the GLCM of the NDArray bands with padding, and caches it.


See glcm_padded for the parameters and output shape

append_glcm_padded_cached(ar, bin_from, bin_to, radius, step_size, features)

Computes the GLCM of the NDArray bands with padding, and caches it and also appends it onto the original array.


See glcm_padded for the parameters


The return shape is:

The function automatically flattens the last 2 dimensions of the GLCM features, and appends it onto the original array.

Last modified: 26 June 2024
\ No newline at end of file diff --git a/docs/preprocessing-morphology.html b/docs/preprocessing-morphology.html index ba281416..140058d3 100644 --- a/docs/preprocessing-morphology.html +++ b/docs/preprocessing-morphology.html @@ -1,5 +1,5 @@ -preprocessing.morphology | Documentation

Documentation 0.1.2 Help

preprocessing.morphology

Functions

threshold_binary_mask

Thresholds a selected NDArray bands to yield a binary mask.

binary_watershed

Performs watershed on a binary mask to yield a mapped label classification

Usage

Perform auto-segmentation on a dataset to yield a label classification.

+}

Documentation 0.1.2 Help

preprocessing.morphology

Functions

threshold_binary_mask

Thresholds a selected NDArray bands to yield a binary mask.

binary_watershed

Performs watershed on a binary mask to yield a mapped label classification

Usage

Perform auto-segmentation on a dataset to yield a label classification.

from frdc.load.preset import FRDCDatasetPreset from frdc.preprocess.morphology import ( threshold_binary_mask, binary_watershed @@ -23,6 +23,6 @@ ar, order = ds._get_ar_bands() mask = threshold_binary_mask(ar, order.index('NIR'), 90 / 256) ar_label = binary_watershed(mask) -

API

threshold_binary_mask(ar, band_idx, threshold_value)

Thresholds a selected NDArray bands to yield a binary mask as np.ndarray


This is equivalent to

+

API

threshold_binary_mask(ar, band_idx, threshold_value)

Thresholds a selected NDArray bands to yield a binary mask as np.ndarray


This is equivalent to

ar[..., band_idx] > threshold_value -
binary_watershed(ar_mask, peaks_footprint, watershed_compactness)

Performs watershed on a binary mask to yield a mapped label classification as a np.ndarray


  • peaks_footprint is the footprint of skimage.feature.peak_local_max

  • watershed_compactness is the compactness of skimage.morphology.watershed

Last modified: 26 June 2024
\ No newline at end of file +
binary_watershed(ar_mask, peaks_footprint, watershed_compactness)

Performs watershed on a binary mask to yield a mapped label classification as a np.ndarray


  • peaks_footprint is the footprint of skimage.feature.peak_local_max

  • watershed_compactness is the compactness of skimage.morphology.watershed

Last modified: 26 June 2024
\ No newline at end of file diff --git a/docs/preprocessing-scale.html b/docs/preprocessing-scale.html index 386d7c47..d5ab0f0e 100644 --- a/docs/preprocessing-scale.html +++ b/docs/preprocessing-scale.html @@ -1,5 +1,5 @@ -preprocessing.scale | Documentation

Documentation 0.1.2 Help

preprocessing.scale

Functions

scale_0_1_per_band

Scales the NDArray bands to [0, 1] per band.

scale_normal_per_band

Scales the NDArray bands to zero mean unit variance per band.

scale_static_per_band

Scales the NDArray bands by a predefined configuration. Take a look at frdc.conf.BAND_MAX_CONFIG for an example.

Usage

+}

Documentation 0.1.2 Help

preprocessing.scale

Functions

scale_0_1_per_band

Scales the NDArray bands to [0, 1] per band.

scale_normal_per_band

Scales the NDArray bands to zero mean unit variance per band.

scale_static_per_band

Scales the NDArray bands by a predefined configuration. Take a look at frdc.conf.BAND_MAX_CONFIG for an example.

Usage

from frdc.load.preset import FRDCDatasetPreset from frdc.preprocess.scale import ( scale_0_1_per_band, scale_normal_per_band, scale_static_per_band @@ -25,4 +25,4 @@ ar_01 = scale_0_1_per_band(ar) ar_norm = scale_normal_per_band(ar) ar_static = scale_static_per_band(ar, order, BAND_MAX_CONFIG) -
Last modified: 26 June 2024
\ No newline at end of file +
Last modified: 26 June 2024
\ No newline at end of file diff --git a/docs/retrieve-our-datasets.html b/docs/retrieve-our-datasets.html index 23964d77..d0c2fdce 100644 --- a/docs/retrieve-our-datasets.html +++ b/docs/retrieve-our-datasets.html @@ -1,5 +1,5 @@ -Retrieve our Datasets | Documentation

Documentation 0.1.2 Help

Retrieve our Datasets

In this tutorial, we'll learn how to :

  • Retrieve FRDC's Datasets

  • How to inspect the data

  • How to integrate it with PyTorch's DataLoader

  • How to visualize the data

Prerequisites

  • New here? Get Started.

  • Setup the Google Cloud Authorization to download the data.

Retrieve the Data

To retrieve the data, use FRDCDatasetPreset. This module presets to load explicitly known datasets.

For example:

+}

Documentation 0.1.2 Help

Retrieve our Datasets

In this tutorial, we'll learn how to :

  • Retrieve FRDC's Datasets

  • How to inspect the data

  • How to integrate it with PyTorch's DataLoader

  • How to visualize the data

Prerequisites

  • New here? Get Started.

  • Setup the Google Cloud Authorization to download the data.

Retrieve the Data

To retrieve the data, use FRDCDatasetPreset. This module presets to load explicitly known datasets.

For example:

from frdc.load.preset import FRDCDatasetPreset ds = FRDCDatasetPreset.chestnut_20201218() for x, y in ds: print(x.shape, y) -

You should get something like this:

+

You should get something like this:

(831, 700, 8) Falcataria Moluccana (540, 536, 8) Ficus Variegata (457, 660, 8) Bridelia Sp. ... -
  • x is a torch.Tensor

  • y is a str.

Iterate through the Data

The dataset, when you load it, will be automatically segmented by bounds. Therefore, if you want to simply loop through the segments and labels, you can treat the dataset as an iterable.

+
  • x is a torch.Tensor

  • y is a str.

Iterate through the Data

The dataset, when you load it, will be automatically segmented by bounds. Therefore, if you want to simply loop through the segments and labels, you can treat the dataset as an iterable.

from frdc.load.preset import FRDCDatasetPreset ds = FRDCDatasetPreset.chestnut_20201218() for x, y in ds: print(x.shape, y) -

If you just want the segments or targets separately, use .ar_segments and .targets respectively.

+

If you just want the segments or targets separately, use .ar_segments and .targets respectively.

from frdc.load.preset import FRDCDatasetPreset ds = FRDCDatasetPreset.chestnut_20201218() @@ -39,19 +39,19 @@ for y in ds.targets: print(y) -

If you want the entire image, use .ar.

+

If you want the entire image, use .ar.

from frdc.load.preset import FRDCDatasetPreset ds = FRDCDatasetPreset.chestnut_20201218() ar = ds.ar -

Finally, inspect the order of the bands through the band_order attribute.

+

Finally, inspect the order of the bands through the band_order attribute.

from frdc.load.preset import FRDCDatasetPreset ds = FRDCDatasetPreset.chestnut_20201218() ds.band_order
> ['WB', 'WG', 'WR', 'NB', 'NG', 'NR', 'RE', 'NIR'] -

Using with PyTorch's DataLoader

Every FRDCDataset is a Dataset object, so you can use it with PyTorch's DataLoader. This allows you to retrieve by batches!

+

Using with PyTorch's DataLoader

Every FRDCDataset is a Dataset object, so you can use it with PyTorch's DataLoader. This allows you to retrieve by batches!

from torch.utils.data import DataLoader from torchvision.transforms.v2 import CenterCrop, Compose, Resize, ToImage @@ -65,12 +65,12 @@ for x, y in dl: print(x.shape, y) -

Which should output

+

Which should output

torch.Size([4, 8, 100, 100]) ('Falcataria Moluccana', ...) torch.Size([4, 8, 100, 100]) ('Clausena Excavata', ...) torch.Size([4, 8, 100, 100]) ('Clausena Excavata', ...) ... -

Plot the Data (Optional)

We can then use these data to plot out the first tree segment.

+

Plot the Data (Optional)

We can then use these data to plot out the first tree segment.

import matplotlib.pyplot as plt from frdc.load.preset import FRDCDatasetPreset @@ -84,4 +84,4 @@ plt.imshow(segment_0_rgb_scaled) plt.title(f"Tree {ds.targets[0]}") plt.show() -

See also: preprocessing.scale.scale_0_1_per_band

MatPlotLib cannot show the data correctly as-is, so we need to

  • Convert the data from BGR to RGB

  • Scale the data to 0-1 per band

Last modified: 26 June 2024
\ No newline at end of file +

See also: preprocessing.scale.scale_0_1_per_band

MatPlotLib cannot show the data correctly as-is, so we need to

  • Convert the data from BGR to RGB

  • Scale the data to 0-1 per band

Last modified: 26 June 2024
\ No newline at end of file diff --git a/src/frdc/conf.py b/src/frdc/conf.py index 5d566b4e..84e153eb 100644 --- a/src/frdc/conf.py +++ b/src/frdc/conf.py @@ -29,13 +29,12 @@ ENV_EXAMPLE_FILE = ROOT_DIR / ".env.example" if ENV_EXAMPLE_FILE.exists(): shutil.copy(ENV_EXAMPLE_FILE, ENV_FILE) - raise FileNotFoundError( + logger.warning( f"Environment file not found at {ENV_FILE.as_posix()}. " "A new one has been created from the .env.example file.\n" - "Set the necessary variables and re-run the script." ) else: - raise FileNotFoundError( + logger.warning( f"Environment file not found at {ENV_FILE.as_posix()}. " "Please create one or copy the .env.example file in the GitHub " "repository."