From ddc7e1c8b38fea5850a07ea6aaef0a6571e83b6e Mon Sep 17 00:00:00 2001 From: Evening Date: Thu, 28 Dec 2023 14:48:01 +0800 Subject: [PATCH 01/18] Implement Preset Class This class will help reduce errors when getting datasets IDE autocomplete prompts --- src/frdc/load/dataset.py | 97 +++++++++++++++++++++++++++------------- 1 file changed, 67 insertions(+), 30 deletions(-) diff --git a/src/frdc/load/dataset.py b/src/frdc/load/dataset.py index b3654cca..34099fc2 100644 --- a/src/frdc/load/dataset.py +++ b/src/frdc/load/dataset.py @@ -4,7 +4,7 @@ from collections import OrderedDict from dataclasses import dataclass from pathlib import Path -from typing import Iterable, Callable, Any +from typing import Iterable, Callable, Any, Protocol import numpy as np import pandas as pd @@ -40,6 +40,10 @@ def __init__( ): """Initializes the FRDC Dataset. + Notes: + We recommend to check FRDCDatasetPreset if you want to use a + pre-defined dataset. + Args: site: The site of the dataset, e.g. "chestnut_nature_park". date: The date of the dataset, e.g. "20201218". @@ -83,35 +87,6 @@ def __getitem__(self, idx): else self.targets[idx], ) - @staticmethod - def _load_debug_dataset(resize: int = 299) -> FRDCDataset: - """Loads a debug dataset from Google Cloud Storage. - - Returns: - A dictionary of the dataset, with keys as the filenames and values - as the images. - """ - from torchvision.transforms.v2 import ( - Compose, - ToImage, - ToDtype, - Resize, - ) - - return FRDCDataset( - site="DEBUG", - date="0", - version=None, - transform=Compose( - [ - ToImage(), - ToDtype(torch.float32), - Resize((resize, resize)), - ] - ), - target_transform=None, - ) - @property def dataset_dir(self): return Path( @@ -247,6 +222,68 @@ def _load_image(path: Path | str) -> np.ndarray: return np.expand_dims(ar, axis=-1) if ar.ndim == 2 else ar +class FRDCDatasetPartial(Protocol): + """This class is used to provide type hints for FRDCDatasetPreset.""" + + def __call__( + self, + transform: Callable[[list[np.ndarray]], Any] = None, + target_transform: Callable[[list[str]], list[str]] = None, + use_legacy_bounds: bool = False, + ): + ... + + +# This curries the FRDCDataset class, so that we can shorthand the preset +# definitions. +def dataset(site: str, date: str, version: str | None) -> FRDCDatasetPartial: + def inner( + transform: Callable[[list[np.ndarray]], Any] = None, + target_transform: Callable[[list[str]], list[str]] = None, + use_legacy_bounds: bool = False, + ): + return FRDCDataset( + site, date, version, transform, target_transform, use_legacy_bounds + ) + + return inner + + +from torchvision.transforms.v2 import ( + Compose, + ToImage, + ToDtype, + Resize, +) + + +@dataclass +class FRDCDatasetPreset: + chestnut_20201218 = dataset("chestnut_nature_park", "20201218", None) + chestnut_20210510_43m = dataset( + "chestnut_nature_park", "20210510", "90deg43m85pct255deg" + ) + chestnut_20210510_60m = dataset( + "chestnut_nature_park", "20210510", "90deg60m84.5pct255deg" + ) + casuarina_20220418_183deg = dataset( + "casuarina_nature_park", "20220418", "183deg" + ) + casuarina_20220418_93deg = dataset( + "casuarina_nature_park", "20220418", "93deg" + ) + DEBUG = lambda resize=299: dataset(site="DEBUG", date="0", version=None)( + transform=Compose( + [ + ToImage(), + ToDtype(torch.float32), + Resize((resize, resize)), + ] + ), + target_transform=None, + ) + + # TODO: Kind of hacky, the unlabelled dataset should somehow come from the # labelled dataset by filtering out the unknown labels. But we'll # figure out this later when we do get unlabelled data. From 76b4dff776745a84badcb3fd41ebfb33ca472fc3 Mon Sep 17 00:00:00 2001 From: Evening Date: Thu, 28 Dec 2023 14:48:19 +0800 Subject: [PATCH 02/18] Update debug dataset loading --- tests/conftest.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/conftest.py b/tests/conftest.py index 1e9d84bd..d420f691 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2,11 +2,12 @@ import pytest from frdc.load import FRDCDataset +from frdc.load.dataset import FRDCDatasetPreset @pytest.fixture(scope="session") def ds() -> FRDCDataset: - return FRDCDataset._load_debug_dataset() + return FRDCDatasetPreset.DEBUG() @pytest.fixture(scope="session") From 2fbd4d410b50a99ccf707d6be6dbc59dbf16bca8 Mon Sep 17 00:00:00 2001 From: Evening Date: Thu, 28 Dec 2023 14:48:44 +0800 Subject: [PATCH 03/18] Update preset loading for chestnut training --- tests/model_tests/chestnut_dec_may/train.py | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/tests/model_tests/chestnut_dec_may/train.py b/tests/model_tests/chestnut_dec_may/train.py index f31ee825..64dc7c8a 100644 --- a/tests/model_tests/chestnut_dec_may/train.py +++ b/tests/model_tests/chestnut_dec_may/train.py @@ -22,7 +22,7 @@ from sklearn.preprocessing import StandardScaler, OrdinalEncoder from frdc.load import FRDCDataset -from frdc.load.dataset import FRDCUnlabelledDataset +from frdc.load.dataset import FRDCUnlabelledDataset, FRDCDatasetPreset from frdc.models.inceptionv3 import InceptionV3MixMatchModule from frdc.train.frdc_datamodule import FRDCDataModule from model_tests.utils import ( @@ -44,11 +44,8 @@ def main( run = wandb.init() logger = WandbLogger(name="chestnut_dec_may", project="frdc") # Prepare the dataset - train_lab_ds = FRDCDataset( - "chestnut_nature_park", - "20201218", - None, - transform=train_preprocess, + train_lab_ds = FRDCDatasetPreset.chestnut_20201218( + transform=train_preprocess ) # TODO: This is a hacky impl of the unlabelled dataset, see the docstring @@ -60,13 +57,7 @@ def main( transform=train_unl_preprocess(2), ) - # Subset(train_ds, np.argwhere(train_ds.targets == 0).reshape(-1)) - val_ds = FRDCDataset( - "chestnut_nature_park", - "20210510", - "90deg43m85pct255deg", - transform=preprocess, - ) + val_ds = FRDCDatasetPreset.chestnut_20210510_43m(transform=preprocess) oe = OrdinalEncoder( handle_unknown="use_encoded_value", From b5a465a976e0415a7803c14561719689e9c35b4d Mon Sep 17 00:00:00 2001 From: Evening Date: Thu, 28 Dec 2023 14:51:44 +0800 Subject: [PATCH 04/18] Move import to top --- src/frdc/load/dataset.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/src/frdc/load/dataset.py b/src/frdc/load/dataset.py index 34099fc2..54b9c918 100644 --- a/src/frdc/load/dataset.py +++ b/src/frdc/load/dataset.py @@ -11,6 +11,12 @@ import torch from PIL import Image from torch.utils.data import Dataset, ConcatDataset +from torchvision.transforms.v2 import ( + Compose, + ToImage, + ToDtype, + Resize, +) from frdc.conf import ( BAND_CONFIG, @@ -249,14 +255,6 @@ def inner( return inner -from torchvision.transforms.v2 import ( - Compose, - ToImage, - ToDtype, - Resize, -) - - @dataclass class FRDCDatasetPreset: chestnut_20201218 = dataset("chestnut_nature_park", "20201218", None) From 334daa7c50fb0ae410f1780f9ef0884369f94eb7 Mon Sep 17 00:00:00 2001 From: Evening Date: Thu, 28 Dec 2023 15:32:58 +0800 Subject: [PATCH 05/18] Implement interface to use add op to concat --- src/frdc/load/dataset.py | 33 ++++++++++++---------- tests/unit_tests/load/test_frdc_dataset.py | 12 ++++++++ 2 files changed, 30 insertions(+), 15 deletions(-) diff --git a/src/frdc/load/dataset.py b/src/frdc/load/dataset.py index 54b9c918..578f913c 100644 --- a/src/frdc/load/dataset.py +++ b/src/frdc/load/dataset.py @@ -33,6 +33,20 @@ logger = logging.getLogger(__name__) +# This is not yet used much as we don't have sufficient training data. +class FRDCConcatDataset(ConcatDataset): + def __init__(self, datasets: list[FRDCDataset]): + super().__init__(datasets) + self.datasets: list[FRDCDataset] = datasets + + @property + def targets(self): + return [t for ds in self.datasets for t in ds.targets] + + def __add__(self, other: FRDCDataset) -> FRDCConcatDataset: + return FRDCConcatDataset([*self.datasets, other]) + + @dataclass class FRDCDataset(Dataset): def __init__( @@ -63,6 +77,7 @@ def __init__( self.version = version self.ar, self.order = self.get_ar_bands() + self.targets = None if use_legacy_bounds or (LABEL_STUDIO_CLIENT is None): logger.warning( @@ -227,6 +242,9 @@ def _load_image(path: Path | str) -> np.ndarray: ar = np.asarray(im) return np.expand_dims(ar, axis=-1) if ar.ndim == 2 else ar + def __add__(self, other) -> FRDCConcatDataset: + return FRDCConcatDataset([self, other]) + class FRDCDatasetPartial(Protocol): """This class is used to provide type hints for FRDCDatasetPreset.""" @@ -296,18 +314,3 @@ def __getitem__(self, item): if self.transform else self.ar_segments[item] ) - - -# This is not yet used much as we don't have sufficient training data. -class FRDCConcatDataset(ConcatDataset): - def __init__(self, datasets: list[FRDCDataset]): - super().__init__(datasets) - self.datasets = datasets - - def __getitem__(self, idx): - x, y = super().__getitem__(idx) - return x, y - - @property - def targets(self): - return [t for ds in self.datasets for t in ds.targets] diff --git a/tests/unit_tests/load/test_frdc_dataset.py b/tests/unit_tests/load/test_frdc_dataset.py index c0e2c838..0a75425c 100644 --- a/tests/unit_tests/load/test_frdc_dataset.py +++ b/tests/unit_tests/load/test_frdc_dataset.py @@ -1,4 +1,5 @@ from frdc.conf import BAND_CONFIG +from frdc.load.dataset import FRDCConcatDataset from frdc.utils import Rect @@ -23,3 +24,14 @@ def test_get_bounds(ds): bounds, labels = ds.get_bounds_and_labels() assert all([isinstance(b, Rect) for b in bounds]) assert len(bounds) == len(labels) + + +def test_ds_add_ds_creates_concat_ds(ds): + assert isinstance(ds + ds, FRDCConcatDataset) + assert len(ds + ds) == len(ds) * 2 + + +def test_concat_ds_add_ds_creates_concat_ds(ds): + cds = ds + ds + assert isinstance(cds + ds, FRDCConcatDataset) + assert len(cds + ds) == len(ds) * 3 From ef57cf4b35d118253e92efb363697cdaebb3b7d4 Mon Sep 17 00:00:00 2001 From: Evening Date: Thu, 28 Dec 2023 15:34:03 +0800 Subject: [PATCH 06/18] Remove unused import --- tests/model_tests/chestnut_dec_may/train.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/model_tests/chestnut_dec_may/train.py b/tests/model_tests/chestnut_dec_may/train.py index 64dc7c8a..35999543 100644 --- a/tests/model_tests/chestnut_dec_may/train.py +++ b/tests/model_tests/chestnut_dec_may/train.py @@ -21,7 +21,6 @@ from lightning.pytorch.loggers import WandbLogger from sklearn.preprocessing import StandardScaler, OrdinalEncoder -from frdc.load import FRDCDataset from frdc.load.dataset import FRDCUnlabelledDataset, FRDCDatasetPreset from frdc.models.inceptionv3 import InceptionV3MixMatchModule from frdc.train.frdc_datamodule import FRDCDataModule From f04b9a3882c579186163855a8ff2be70322bfd71 Mon Sep 17 00:00:00 2001 From: Evening Date: Thu, 28 Dec 2023 15:50:11 +0800 Subject: [PATCH 07/18] Move warning to func def --- src/frdc/load/dataset.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/frdc/load/dataset.py b/src/frdc/load/dataset.py index 578f913c..06fa4f97 100644 --- a/src/frdc/load/dataset.py +++ b/src/frdc/load/dataset.py @@ -80,11 +80,6 @@ def __init__( self.targets = None if use_legacy_bounds or (LABEL_STUDIO_CLIENT is None): - logger.warning( - "Using legacy bounds.csv file for dataset." - "This is pending to be deprecated in favour of pulling " - "annotations from Label Studio." - ) bounds, self.targets = self.get_bounds_and_labels() self.ar_segments = extract_segments_from_bounds(self.ar, bounds) else: @@ -211,6 +206,11 @@ def get_bounds_and_labels( A tuple of (bounds, labels), where bounds is a list of (x0, y0, x1, y1) and labels is a list of labels. """ + logger.warning( + "Using legacy bounds.csv file for dataset." + "This is pending to be deprecated in favour of pulling " + "annotations from Label Studio." + ) fp = download(fp=self.dataset_dir / file_name) df = pd.read_csv(fp) return ( From 419fd9ef54552dcec457251ac060f5605a3914e4 Mon Sep 17 00:00:00 2001 From: Evening Date: Thu, 28 Dec 2023 17:23:52 +0800 Subject: [PATCH 08/18] Improve syntax of creating unlabelled datasets --- src/frdc/load/dataset.py | 199 ++++++++++++++++++++++++++++++++------- 1 file changed, 165 insertions(+), 34 deletions(-) diff --git a/src/frdc/load/dataset.py b/src/frdc/load/dataset.py index 06fa4f97..258aac9c 100644 --- a/src/frdc/load/dataset.py +++ b/src/frdc/load/dataset.py @@ -4,7 +4,7 @@ from collections import OrderedDict from dataclasses import dataclass from pathlib import Path -from typing import Iterable, Callable, Any, Protocol +from typing import Iterable, Callable, Any import numpy as np import pandas as pd @@ -33,8 +33,32 @@ logger = logging.getLogger(__name__) -# This is not yet used much as we don't have sufficient training data. class FRDCConcatDataset(ConcatDataset): + """ConcatDataset for FRDCDataset. + + Notes: + This handles concatenating the targets when you add two datasets + together, furthermore, implements the addition operator to + simplify the syntax. + + Examples: + If you have two datasets, ds1 and ds2, you can concatenate them:: + + ds = ds1 + ds2 + + `ds` will be a FRDCConcatDataset, which is a subclass of ConcatDataset. + + You can further add to a concatenated dataset:: + + ds = ds1 + ds2 + ds = ds + ds3 + + Finallu, all concatenated datasets have the `targets` property, which + is a list of all the targets in the datasets:: + + (ds1 + ds2).targets == ds1.targets + ds2.targets + """ + def __init__(self, datasets: list[FRDCDataset]): super().__init__(datasets) self.datasets: list[FRDCDataset] = datasets @@ -64,6 +88,13 @@ def __init__( We recommend to check FRDCDatasetPreset if you want to use a pre-defined dataset. + You can concatenate datasets using the addition operator, e.g.:: + + ds = FRDCDataset(...) + FRDCDataset(...) + + This will return a FRDCConcatDataset, see FRDCConcatDataset for + more information. + Args: site: The site of the dataset, e.g. "chestnut_nature_park". date: The date of the dataset, e.g. "20201218". @@ -71,6 +102,9 @@ def __init__( transform: The transform to apply to each segment. target_transform: The transform to apply to each label. use_legacy_bounds: Whether to use the legacy bounds.csv file. + This will automatically be set to True if LABEL_STUDIO_CLIENT + is None, which happens when Label Studio cannot be connected + to. """ self.site = site self.date = date @@ -105,6 +139,7 @@ def __getitem__(self, idx): @property def dataset_dir(self): + """Returns the path format of the dataset.""" return Path( f"{self.site}/{self.date}/" f"{self.version + '/' if self.version else ''}" @@ -219,6 +254,7 @@ def get_bounds_and_labels( ) def get_polybounds_and_labels(self): + """Gets the bounds and labels from Label Studio.""" return get_task( Path(f"{self.dataset_dir}/result.jpg") ).get_bounds_and_labels() @@ -246,8 +282,32 @@ def __add__(self, other) -> FRDCConcatDataset: return FRDCConcatDataset([self, other]) -class FRDCDatasetPartial(Protocol): - """This class is used to provide type hints for FRDCDatasetPreset.""" +# This curries the FRDCDataset class, so that we can shorthand the preset +# definitions. +@dataclass +class FRDCDatasetPartial: + """Partial class for FRDCDataset. + + Notes: + This is used internally by FRDCDatasetPreset to define the presets + in a more concise manner:: + + # Instead of + lambda *args, **kwargs: + FRDCDataset("chestnut_nature_park", "20201218", None, + *args, **kwargs) + + # Using partial, we can do this instead + FRDCDatasetPartial("chestnut_nature_park", "20201218", None)( + *args, **kwargs + ) + + See FRDCDatasetPreset for usage. + """ + + site: str + date: str + version: str | None def __call__( self, @@ -255,40 +315,127 @@ def __call__( target_transform: Callable[[list[str]], list[str]] = None, use_legacy_bounds: bool = False, ): - ... - + """Alias for labelled().""" + return self.labelled( + transform, + target_transform, + use_legacy_bounds, + ) -# This curries the FRDCDataset class, so that we can shorthand the preset -# definitions. -def dataset(site: str, date: str, version: str | None) -> FRDCDatasetPartial: - def inner( + def labelled( + self, transform: Callable[[list[np.ndarray]], Any] = None, target_transform: Callable[[list[str]], list[str]] = None, use_legacy_bounds: bool = False, ): + """Returns the Labelled Dataset.""" return FRDCDataset( - site, date, version, transform, target_transform, use_legacy_bounds + self.site, + self.date, + self.version, + transform, + target_transform, + use_legacy_bounds, + ) + + def unlabelled( + self, + transform: Callable[[list[np.ndarray]], Any] = None, + target_transform: Callable[[list[str]], list[str]] = None, + use_legacy_bounds: bool = False, + ): + """Returns the Unlabelled Dataset. + + Notes: + This simply masks away the labels during __getitem__. + The same behaviour can be achieved by setting __class__ to + FRDCUnlabelledDataset, but this is a more convenient way to do so. + """ + return FRDCUnlabelledDataset( + self.site, + self.date, + self.version, + transform, + target_transform, + use_legacy_bounds, ) - return inner + +class FRDCUnlabelledDataset(FRDCDataset): + """An implementation of FRDCDataset that masks away the labels. + + Notes: + If you already have a FRDCDataset, you can simply set __class__ to + FRDCUnlabelledDataset to achieve the same behaviour:: + + ds.__class__ = FRDCUnlabelledDataset + + This will replace the __getitem__ method with the one below. + + However, it's also perfectly fine to initialize this directly:: + + ds_unl = FRDCUnlabelledDataset(...) + """ + + def __getitem__(self, item): + return ( + self.transform(self.ar_segments[item]) + if self.transform + else self.ar_segments[item] + ) @dataclass class FRDCDatasetPreset: - chestnut_20201218 = dataset("chestnut_nature_park", "20201218", None) - chestnut_20210510_43m = dataset( + """Presets for the FRDCDataset. + + Examples: + Each variable is a preset for the FRDCDataset. + + You can use it like this:: + + FRDCDatasetPreset.chestnut_20201218() + + Which returns a FRDCDataset. + + Furthermore, if you're interested in the unlabelled dataset, you can + use:: + + FRDCDatasetPreset.chestnut_20201218.unlabelled() + + Which returns a FRDCUnlabelledDataset. + + If you'd like to keep the syntax consistent for labelled and unlabelled + datasets, you can use:: + + FRDCDatasetPreset.chestnut_20201218.labelled() + FRDCDatasetPreset.chestnut_20201218.unlabelled() + + The `labelled` method is simply an alias for the `__call__` method. + + The DEBUG dataset is a special dataset that is used for debugging, + which pulls from GCS a small cropped image and dummy label + bounds. + + """ + + chestnut_20201218 = FRDCDatasetPartial( + "chestnut_nature_park", "20201218", None + ) + chestnut_20210510_43m = FRDCDatasetPartial( "chestnut_nature_park", "20210510", "90deg43m85pct255deg" ) - chestnut_20210510_60m = dataset( + chestnut_20210510_60m = FRDCDatasetPartial( "chestnut_nature_park", "20210510", "90deg60m84.5pct255deg" ) - casuarina_20220418_183deg = dataset( + casuarina_20220418_183deg = FRDCDatasetPartial( "casuarina_nature_park", "20220418", "183deg" ) - casuarina_20220418_93deg = dataset( + casuarina_20220418_93deg = FRDCDatasetPartial( "casuarina_nature_park", "20220418", "93deg" ) - DEBUG = lambda resize=299: dataset(site="DEBUG", date="0", version=None)( + DEBUG = lambda resize=299: FRDCDatasetPartial( + site="DEBUG", date="0", version=None + )( transform=Compose( [ ToImage(), @@ -298,19 +445,3 @@ class FRDCDatasetPreset: ), target_transform=None, ) - - -# TODO: Kind of hacky, the unlabelled dataset should somehow come from the -# labelled dataset by filtering out the unknown labels. But we'll -# figure out this later when we do get unlabelled data. -# I'm thinking some API that's like -# FRDCDataset.filter_labels(...) -> FRDCSubset, FRDCSubset -# It could be more intuitive if it returns FRDCDataset, so we don't have -# to implement another class. -class FRDCUnlabelledDataset(FRDCDataset): - def __getitem__(self, item): - return ( - self.transform(self.ar_segments[item]) - if self.transform - else self.ar_segments[item] - ) From 7d3183e86aeb790fb00c6617d7a01bfb2304b033 Mon Sep 17 00:00:00 2001 From: Evening Date: Thu, 28 Dec 2023 17:24:49 +0800 Subject: [PATCH 09/18] Implement auto casting of labelled to unlabelled This provides a failsafe interface if somehow someone forgot to use the unlabelled set, which is totally fine --- src/frdc/train/frdc_datamodule.py | 45 +++++++++++++++++++------------ 1 file changed, 28 insertions(+), 17 deletions(-) diff --git a/src/frdc/train/frdc_datamodule.py b/src/frdc/train/frdc_datamodule.py index 6138c7e5..97daaa6b 100644 --- a/src/frdc/train/frdc_datamodule.py +++ b/src/frdc/train/frdc_datamodule.py @@ -1,11 +1,14 @@ from __future__ import annotations from dataclasses import dataclass +from types import MethodType +from typing import Any from lightning import LightningDataModule from torch.utils.data import DataLoader, RandomSampler from frdc.load import FRDCDataset +from frdc.load.dataset import FRDCUnlabelledDataset @dataclass @@ -14,34 +17,39 @@ class FRDCDataModule(LightningDataModule): Notes: This is a special datamodule for semi-supervised learning, which - requires two dataloaders for the labelled and unlabelled datasets. - It can also be used for supervised learning, by passing in None for - the unlabelled dataset. + can accept an optional dataloaders for an unlabelled dataset. + + Without an unsupervised dataset it can be used for supervised learning, + by passing in None for the unlabelled dataset. If you're using our MixMatch Module, using None for the unlabelled dataset will skip the MixMatch. However, note that this is not equivalent to passing the Labelled set as unlabelled as well. - For example: - >>> FRDCSSLDataModule( - ... train_lab_ds=train_lab_ds, - ... train_unl_ds=train_lab_ds, - ... ... - ... ) + For example:: + + FRDCDataModule( + train_lab_ds=train_lab_ds, + train_unl_ds=train_lab_ds, + ... + ) + + Does not have the same performance as:: - Does not have the same performance as: - >>> FRDCSSLDataModule( - ... train_lab_ds=train_lab_ds, - ... train_unl_ds=None, - ... ... - ... ) + FRDCSSLDataModule( + train_lab_ds=train_lab_ds, + train_unl_ds=None, + ... + ) As partially, some samples in MixMatch uses the unlabelled loss. Args: train_lab_ds: The labelled training dataset. train_unl_ds: The unlabelled training dataset. Can be None, which will - default to a DataModule suitable for supervised learning. + default to a DataModule suitable for supervised learning. If + train_unl_ds is a FRDCDataset, it will be converted to a + FRDCUnlabelledDataset, which simply masks away the labels. val_ds: The validation dataset. batch_size: The batch size to use for the dataloaders. train_iters: The number of iterations to run for the labelled training @@ -52,7 +60,7 @@ class FRDCDataModule(LightningDataModule): train_lab_ds: FRDCDataset val_ds: FRDCDataset - train_unl_ds: FRDCDataset | None = None + train_unl_ds: FRDCDataset | FRDCUnlabelledDataset | None = None batch_size: int = 4 train_iters: int = 100 val_iters: int = 100 @@ -60,6 +68,9 @@ class FRDCDataModule(LightningDataModule): def __post_init__(self): super().__init__() + if isinstance(self.train_unl_ds, FRDCDataset): + self.train_unl_ds.__class__ = FRDCUnlabelledDataset + def train_dataloader(self): num_samples = self.batch_size * self.train_iters lab_dl = DataLoader( From a34d3677da69fe81d44c30d6ce3bb71a45096b3e Mon Sep 17 00:00:00 2001 From: Evening Date: Thu, 28 Dec 2023 17:25:29 +0800 Subject: [PATCH 10/18] Refactor unlabelled to use the preset --- tests/model_tests/chestnut_dec_may/train.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/tests/model_tests/chestnut_dec_may/train.py b/tests/model_tests/chestnut_dec_may/train.py index 35999543..53099ece 100644 --- a/tests/model_tests/chestnut_dec_may/train.py +++ b/tests/model_tests/chestnut_dec_may/train.py @@ -47,12 +47,7 @@ def main( transform=train_preprocess ) - # TODO: This is a hacky impl of the unlabelled dataset, see the docstring - # for future work. - train_unl_ds = FRDCUnlabelledDataset( - "chestnut_nature_park", - "20201218", - None, + train_unl_ds = FRDCDatasetPreset.chestnut_20201218.unlabelled( transform=train_unl_preprocess(2), ) From 58c977dc86ca02ea48fdc8075c9cf16ec4e86144 Mon Sep 17 00:00:00 2001 From: Evening Date: Thu, 28 Dec 2023 17:25:45 +0800 Subject: [PATCH 11/18] Refactor the preprocessing step --- src/frdc/train/mixmatch_module.py | 87 ++++++++++++++----------------- 1 file changed, 38 insertions(+), 49 deletions(-) diff --git a/src/frdc/train/mixmatch_module.py b/src/frdc/train/mixmatch_module.py index 194928ad..784380b6 100644 --- a/src/frdc/train/mixmatch_module.py +++ b/src/frdc/train/mixmatch_module.py @@ -241,72 +241,61 @@ def on_before_batch_transfer(self, batch: Any, dataloader_idx: int) -> Any: We leverage this to do some preprocessing on the data. Namely, we use the StandardScaler and OrdinalEncoder to transform the data. - """ - - # TODO: ngl, this is pretty chunky. - # It works, but it's not very pretty. - if self.training: - (x_lab, y), x_unl = batch - xs = [x_lab, *x_unl] - - b, c, h, w = x_lab.shape - - # Move Channel to the last dimension then transform - xs_ss: list[np.ndarray] = [ - self.x_scaler.transform(x.permute(0, 2, 3, 1).reshape(-1, c)) - for x in xs - ] - - # Move Channel back to the second dimension - xs_: list[torch.Tensor] = [ - torch.from_numpy(x_ss.reshape(b, h, w, c)) - .permute(0, 3, 1, 2) - .float() - for x_ss in xs_ss - ] - - y: tuple[str] - y_: torch.Tensor = torch.from_numpy( - self.y_encoder.transform(np.array(y).reshape(-1, 1)).squeeze() - ) - - # Ordinal Encoders can return a np.nan if the value is not in the - # categories. We will remove that from the batch. - x_ = xs_[0][~torch.isnan(y_)] - y_ = y_[~torch.isnan(y_)] - return (x_, y_.long()), xs_[1:] - - else: - x, y = batch - - x: torch.Tensor - b, c, h, w = x.shape + Notes: + PyTorch Lightning may complain about this being on the Module + instead of the DataModule. However, this is intentional as we + want to export the model alongside the transformations. + """ + def x_trans_fn(x): # Standard Scaler only accepts (n_samples, n_features), # so we need to do some fancy reshaping. # Note that moving dimensions then reshaping is different from just # reshaping! + # Move Channel to the last dimension then transform - x_ss: np.ndarray = self.x_scaler.transform( + # B x C x H x W -> B x H x W x C + b, c, h, w = x.shape + x_ss = self.x_scaler.transform( x.permute(0, 2, 3, 1).reshape(-1, c) ) # Move Channel back to the second dimension - x_: torch.Tensor = ( + # B x H x W x C -> B x C x H x W + return ( torch.from_numpy(x_ss.reshape(b, h, w, c)) .permute(0, 3, 1, 2) .float() ) - y: tuple[str] - y_: torch.Tensor = torch.from_numpy( + def y_trans_fn(y): + return torch.from_numpy( self.y_encoder.transform(np.array(y).reshape(-1, 1)).squeeze() ) - # Ordinal Encoders can return a np.nan if the value is not in the - # categories. We will remove that from the batch. - x_ = x_[~torch.isnan(y_)] - y_ = y_[~torch.isnan(y_)] + # We need to handle the train and val dataloaders differently. + # For training, the unlabelled data is returned while for validation, + # the unlabelled data is just omitted. + if self.training: + (x_lab, y), x_unl = batch + else: + x_lab, y = batch + x_unl = [] + + x_lab_trans = x_trans_fn(x_lab) + y_trans = y_trans_fn(y) + x_unl_trans = [x_trans_fn(x) for x in x_unl] - return x_, y_.long() + # Remove nan values from the batch + # Ordinal Encoders can return a np.nan if the value is not in the + # categories. We will remove that from the batch. + nan = ~torch.isnan(y_trans) + x_lab_trans = x_lab_trans[nan] + x_unl_trans = [x[nan] for x in x_unl_trans] + y_trans = y_trans[nan] + + if self.training: + return (x_lab_trans, y_trans.long()), x_unl_trans + else: + return x_lab_trans, y_trans.long() From 2928147e91b694ad6fc60a9c26497daa09ebd589 Mon Sep 17 00:00:00 2001 From: Evening Date: Thu, 28 Dec 2023 18:02:37 +0800 Subject: [PATCH 12/18] Move common scripts to utils --- src/frdc/utils/training.py | 67 ++++++++++++++++++++++++++++++++++++++ tests/model_tests/utils.py | 50 ++-------------------------- 2 files changed, 69 insertions(+), 48 deletions(-) create mode 100644 src/frdc/utils/training.py diff --git a/src/frdc/utils/training.py b/src/frdc/utils/training.py new file mode 100644 index 00000000..87593f89 --- /dev/null +++ b/src/frdc/utils/training.py @@ -0,0 +1,67 @@ +from __future__ import annotations +from pathlib import Path + +import lightning as pl +import numpy as np +from matplotlib import pyplot as plt +from seaborn import heatmap +from sklearn.metrics import confusion_matrix +from torch.utils.data import DataLoader + +from frdc.load import FRDCDataset + + +def get_latest_ckpt_path(search_dir: Path, extention: str = "ckpt"): + # This fetches all possible checkpoints and gets the latest one + return sorted( + search_dir.glob(f"**/*.{extention}"), + key=lambda x: x.stat().st_mtime_ns, + )[-1] + + +def plot_confusion_matrix( + y_trues, y_preds, labels +) -> tuple[plt.Figure, plt.Axes]: + # Plot the confusion matrix + cm = confusion_matrix(y_trues, y_preds) + + fig, ax = plt.subplots(figsize=(10, 10)) + + heatmap( + cm, + annot=True, + xticklabels=labels, + yticklabels=labels, + cbar=False, + ax=ax, + ) + + fig.tight_layout(pad=3) + ax.set_xlabel("Predicted Label") + ax.set_ylabel("True Label") + + return fig, ax + + +def predict( + ds: FRDCDataset, + model_cls: type[pl.LightningModule], + ckpt_pth: Path | str | None = None, +) -> tuple[np.ndarray, np.ndarray]: + m = model_cls.load_from_checkpoint(ckpt_pth) + # Make predictions + trainer = pl.Trainer(logger=False) + pred = trainer.predict(m, dataloaders=DataLoader(ds, batch_size=32)) + + y_preds = [] + y_trues = [] + for y_true, y_pred in pred: + y_preds.append(y_pred) + y_trues.append(y_true) + y_trues = np.concatenate(y_trues) + y_preds = np.concatenate(y_preds) + return y_trues, y_preds + + +def accuracy(y_trues, y_preds) -> float: + return (y_trues == y_preds).mean() diff --git a/tests/model_tests/utils.py b/tests/model_tests/utils.py index 593bae11..f578a87e 100644 --- a/tests/model_tests/utils.py +++ b/tests/model_tests/utils.py @@ -1,12 +1,8 @@ +from __future__ import annotations + from pathlib import Path -import lightning as pl -import numpy as np import torch -from matplotlib import pyplot as plt -from seaborn import heatmap -from sklearn.metrics import confusion_matrix -from torch.utils.data import DataLoader from torchvision.transforms import RandomVerticalFlip from torchvision.transforms.v2 import ( Compose, @@ -49,48 +45,6 @@ def __getitem__(self, idx): return RandomHorizontalFlip(p=1)(RandomVerticalFlip(p=1)(x)), y -def evaluate( - ds: FRDCDataset, ckpt_pth: Path | str | None = None -) -> tuple[plt.Figure, float]: - if ckpt_pth is None: - # This fetches all possible checkpoints and gets the latest one - ckpt_pth = sorted( - THIS_DIR.glob("**/*.ckpt"), key=lambda x: x.stat().st_mtime_ns - )[-1] - - m = InceptionV3MixMatchModule.load_from_checkpoint(ckpt_pth) - # Make predictions - trainer = pl.Trainer(logger=False) - pred = trainer.predict(m, dataloaders=DataLoader(ds, batch_size=32)) - - y_trues = [] - y_preds = [] - for y_true, y_pred in pred: - y_trues.append(y_true) - y_preds.append(y_pred) - y_trues = np.concatenate(y_trues) - y_preds = np.concatenate(y_preds) - acc = (y_trues == y_preds).mean() - - # Plot the confusion matrix - cm = confusion_matrix(y_trues, y_preds) - - plt.figure(figsize=(10, 10)) - - heatmap( - cm, - annot=True, - xticklabels=m.y_encoder.categories_[0], - yticklabels=m.y_encoder.categories_[0], - cbar=False, - ) - plt.title(f"Accuracy: {acc:.2%}") - plt.tight_layout(pad=3) - plt.xlabel("Predicted Label") - plt.ylabel("True Label") - return plt.gcf(), acc - - def preprocess(x): return Compose( [ From 60b835d335b37256d4a82d3a71aeae19112b9614 Mon Sep 17 00:00:00 2001 From: Evening Date: Thu, 28 Dec 2023 18:02:49 +0800 Subject: [PATCH 13/18] Migrate references for train --- tests/model_tests/chestnut_dec_may/train.py | 27 ++++++++++++--------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/tests/model_tests/chestnut_dec_may/train.py b/tests/model_tests/chestnut_dec_may/train.py index 53099ece..97123f02 100644 --- a/tests/model_tests/chestnut_dec_may/train.py +++ b/tests/model_tests/chestnut_dec_may/train.py @@ -5,8 +5,11 @@ """ # Uncomment this to run the W&B monitoring locally -# import os -# os.environ["WANDB_MODE"] = "offline" +import os + +from frdc.utils.training import predict, plot_confusion_matrix + +os.environ["WANDB_MODE"] = "offline" from pathlib import Path @@ -21,14 +24,13 @@ from lightning.pytorch.loggers import WandbLogger from sklearn.preprocessing import StandardScaler, OrdinalEncoder -from frdc.load.dataset import FRDCUnlabelledDataset, FRDCDatasetPreset +from frdc.load.dataset import FRDCDatasetPreset as ds from frdc.models.inceptionv3 import InceptionV3MixMatchModule from frdc.train.frdc_datamodule import FRDCDataModule from model_tests.utils import ( train_preprocess, train_unl_preprocess, preprocess, - evaluate, FRDCDatasetFlipped, ) @@ -43,15 +45,13 @@ def main( run = wandb.init() logger = WandbLogger(name="chestnut_dec_may", project="frdc") # Prepare the dataset - train_lab_ds = FRDCDatasetPreset.chestnut_20201218( - transform=train_preprocess - ) + train_lab_ds = ds.chestnut_20201218(transform=train_preprocess) - train_unl_ds = FRDCDatasetPreset.chestnut_20201218.unlabelled( - transform=train_unl_preprocess(2), + train_unl_ds = ds.chestnut_20201218.unlabelled( + transform=train_unl_preprocess(2) ) - val_ds = FRDCDatasetPreset.chestnut_20210510_43m(transform=preprocess) + val_ds = ds.chestnut_20210510_43m(transform=preprocess) oe = OrdinalEncoder( handle_unknown="use_encoded_value", @@ -106,15 +106,20 @@ def main( f"- Results: [WandB Report]({run.get_url()})" ) - fig, acc = evaluate( + y_true, y_pred = predict( ds=FRDCDatasetFlipped( "chestnut_nature_park", "20210510", "90deg43m85pct255deg", transform=preprocess, ), + model_cls=InceptionV3MixMatchModule, ckpt_pth=Path(ckpt.best_model_path), ) + fig, ax = plot_confusion_matrix(y_true, y_pred, oe.categories_[0]) + acc = np.sum(y_true == y_pred) / len(y_true) + ax.set_title(f"Accuracy: {acc:.2%}") + wandb.log({"confusion_matrix": wandb.Image(fig)}) wandb.log({"eval_accuracy": acc}) From a78446a4b849170a3f5d3268d411114c62083352 Mon Sep 17 00:00:00 2001 From: Evening Date: Thu, 28 Dec 2023 18:47:06 +0800 Subject: [PATCH 14/18] Fix error in documentation signature --- src/frdc/train/frdc_datamodule.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frdc/train/frdc_datamodule.py b/src/frdc/train/frdc_datamodule.py index 97daaa6b..9f35ce03 100644 --- a/src/frdc/train/frdc_datamodule.py +++ b/src/frdc/train/frdc_datamodule.py @@ -36,7 +36,7 @@ class FRDCDataModule(LightningDataModule): Does not have the same performance as:: - FRDCSSLDataModule( + FRDCDataModule( train_lab_ds=train_lab_ds, train_unl_ds=None, ... From a0583d6f7fc8f7f907fc67afee3476860e44b7a1 Mon Sep 17 00:00:00 2001 From: Evening Date: Thu, 28 Dec 2023 18:47:24 +0800 Subject: [PATCH 15/18] Make wandb online by default --- tests/model_tests/chestnut_dec_may/train.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tests/model_tests/chestnut_dec_may/train.py b/tests/model_tests/chestnut_dec_may/train.py index 97123f02..df2d6814 100644 --- a/tests/model_tests/chestnut_dec_may/train.py +++ b/tests/model_tests/chestnut_dec_may/train.py @@ -5,11 +5,9 @@ """ # Uncomment this to run the W&B monitoring locally -import os - -from frdc.utils.training import predict, plot_confusion_matrix - -os.environ["WANDB_MODE"] = "offline" +# import os +# from frdc.utils.training import predict, plot_confusion_matrix +# os.environ["WANDB_MODE"] = "offline" from pathlib import Path From a64e59a4aaf151143464a672fbbcb37a4623b700 Mon Sep 17 00:00:00 2001 From: Evening Date: Fri, 29 Dec 2023 10:43:07 +0800 Subject: [PATCH 16/18] Migrate Preset classes to preset.py Also forces imports to use load.dataset for FRDCDataset to avoid confusion and circular imports --- Writerside/topics/load.dataset.md | 2 +- .../topics/preprocessing.extract_segments.md | 4 +- Writerside/topics/preprocessing.morphology.md | 2 +- Writerside/topics/preprocessing.scale.md | 2 +- src/frdc/load/__init__.py | 3 - src/frdc/load/dataset.py | 148 ---------------- src/frdc/load/label_studio.py | 10 -- src/frdc/load/preset.py | 159 ++++++++++++++++++ src/frdc/train/frdc_datamodule.py | 5 +- src/frdc/utils/training.py | 2 +- tests/conftest.py | 4 +- tests/model_tests/chestnut_dec_may/train.py | 2 +- tests/model_tests/utils.py | 2 +- 13 files changed, 170 insertions(+), 175 deletions(-) create mode 100644 src/frdc/load/preset.py diff --git a/Writerside/topics/load.dataset.md b/Writerside/topics/load.dataset.md index e5a57699..0c5dbf24 100644 --- a/Writerside/topics/load.dataset.md +++ b/Writerside/topics/load.dataset.md @@ -17,7 +17,7 @@ version. For example, to load our Chestnut Nature Park dataset. ```python -from frdc.load import FRDCDataset +from frdc.load.dataset import FRDCDataset ds = FRDCDataset(site='chestnut_nature_park', date='20201218', diff --git a/Writerside/topics/preprocessing.extract_segments.md b/Writerside/topics/preprocessing.extract_segments.md index ed257e8d..6f422569 100644 --- a/Writerside/topics/preprocessing.extract_segments.md +++ b/Writerside/topics/preprocessing.extract_segments.md @@ -135,7 +135,7 @@ Extract segments from bounds and labels. ```python import numpy as np -from frdc.load import FRDCDataset +from frdc.load.dataset import FRDCDataset from frdc.preprocess.extract_segments import extract_segments_from_bounds ds = FRDCDataset(site='chestnut_nature_park', @@ -155,7 +155,7 @@ Extract segments from a label classification. from skimage.morphology import remove_small_objects, remove_small_holes import numpy as np -from frdc.load import FRDCDataset +from frdc.load.dataset import FRDCDataset from frdc.preprocess.morphology import ( threshold_binary_mask, binary_watershed ) diff --git a/Writerside/topics/preprocessing.morphology.md b/Writerside/topics/preprocessing.morphology.md index 50b5d7b2..062eb464 100644 --- a/Writerside/topics/preprocessing.morphology.md +++ b/Writerside/topics/preprocessing.morphology.md @@ -29,7 +29,7 @@ classification Perform auto-segmentation on a dataset to yield a label classification. ```python -from frdc.load import FRDCDataset +from frdc.load.dataset import FRDCDataset from frdc.preprocess.morphology import ( threshold_binary_mask, binary_watershed ) diff --git a/Writerside/topics/preprocessing.scale.md b/Writerside/topics/preprocessing.scale.md index 2ce224be..513e9d5e 100644 --- a/Writerside/topics/preprocessing.scale.md +++ b/Writerside/topics/preprocessing.scale.md @@ -35,7 +35,7 @@ Take a look at frdc.conf.BAND_MAX_CONFIG for an example. ## Usage ```python -from frdc.load import FRDCDataset +from frdc.load.dataset import FRDCDataset from frdc.preprocess.scale import ( scale_0_1_per_band, scale_normal_per_band, scale_static_per_band ) diff --git a/src/frdc/load/__init__.py b/src/frdc/load/__init__.py index 06860a70..e69de29b 100644 --- a/src/frdc/load/__init__.py +++ b/src/frdc/load/__init__.py @@ -1,3 +0,0 @@ -from .dataset import FRDCDataset - -__all__ = ["FRDCDataset"] diff --git a/src/frdc/load/dataset.py b/src/frdc/load/dataset.py index 258aac9c..466aad93 100644 --- a/src/frdc/load/dataset.py +++ b/src/frdc/load/dataset.py @@ -8,15 +8,8 @@ import numpy as np import pandas as pd -import torch from PIL import Image from torch.utils.data import Dataset, ConcatDataset -from torchvision.transforms.v2 import ( - Compose, - ToImage, - ToDtype, - Resize, -) from frdc.conf import ( BAND_CONFIG, @@ -282,85 +275,6 @@ def __add__(self, other) -> FRDCConcatDataset: return FRDCConcatDataset([self, other]) -# This curries the FRDCDataset class, so that we can shorthand the preset -# definitions. -@dataclass -class FRDCDatasetPartial: - """Partial class for FRDCDataset. - - Notes: - This is used internally by FRDCDatasetPreset to define the presets - in a more concise manner:: - - # Instead of - lambda *args, **kwargs: - FRDCDataset("chestnut_nature_park", "20201218", None, - *args, **kwargs) - - # Using partial, we can do this instead - FRDCDatasetPartial("chestnut_nature_park", "20201218", None)( - *args, **kwargs - ) - - See FRDCDatasetPreset for usage. - """ - - site: str - date: str - version: str | None - - def __call__( - self, - transform: Callable[[list[np.ndarray]], Any] = None, - target_transform: Callable[[list[str]], list[str]] = None, - use_legacy_bounds: bool = False, - ): - """Alias for labelled().""" - return self.labelled( - transform, - target_transform, - use_legacy_bounds, - ) - - def labelled( - self, - transform: Callable[[list[np.ndarray]], Any] = None, - target_transform: Callable[[list[str]], list[str]] = None, - use_legacy_bounds: bool = False, - ): - """Returns the Labelled Dataset.""" - return FRDCDataset( - self.site, - self.date, - self.version, - transform, - target_transform, - use_legacy_bounds, - ) - - def unlabelled( - self, - transform: Callable[[list[np.ndarray]], Any] = None, - target_transform: Callable[[list[str]], list[str]] = None, - use_legacy_bounds: bool = False, - ): - """Returns the Unlabelled Dataset. - - Notes: - This simply masks away the labels during __getitem__. - The same behaviour can be achieved by setting __class__ to - FRDCUnlabelledDataset, but this is a more convenient way to do so. - """ - return FRDCUnlabelledDataset( - self.site, - self.date, - self.version, - transform, - target_transform, - use_legacy_bounds, - ) - - class FRDCUnlabelledDataset(FRDCDataset): """An implementation of FRDCDataset that masks away the labels. @@ -383,65 +297,3 @@ def __getitem__(self, item): if self.transform else self.ar_segments[item] ) - - -@dataclass -class FRDCDatasetPreset: - """Presets for the FRDCDataset. - - Examples: - Each variable is a preset for the FRDCDataset. - - You can use it like this:: - - FRDCDatasetPreset.chestnut_20201218() - - Which returns a FRDCDataset. - - Furthermore, if you're interested in the unlabelled dataset, you can - use:: - - FRDCDatasetPreset.chestnut_20201218.unlabelled() - - Which returns a FRDCUnlabelledDataset. - - If you'd like to keep the syntax consistent for labelled and unlabelled - datasets, you can use:: - - FRDCDatasetPreset.chestnut_20201218.labelled() - FRDCDatasetPreset.chestnut_20201218.unlabelled() - - The `labelled` method is simply an alias for the `__call__` method. - - The DEBUG dataset is a special dataset that is used for debugging, - which pulls from GCS a small cropped image and dummy label + bounds. - - """ - - chestnut_20201218 = FRDCDatasetPartial( - "chestnut_nature_park", "20201218", None - ) - chestnut_20210510_43m = FRDCDatasetPartial( - "chestnut_nature_park", "20210510", "90deg43m85pct255deg" - ) - chestnut_20210510_60m = FRDCDatasetPartial( - "chestnut_nature_park", "20210510", "90deg60m84.5pct255deg" - ) - casuarina_20220418_183deg = FRDCDatasetPartial( - "casuarina_nature_park", "20220418", "183deg" - ) - casuarina_20220418_93deg = FRDCDatasetPartial( - "casuarina_nature_park", "20220418", "93deg" - ) - DEBUG = lambda resize=299: FRDCDatasetPartial( - site="DEBUG", date="0", version=None - )( - transform=Compose( - [ - ToImage(), - ToDtype(torch.float32), - Resize((resize, resize)), - ] - ), - target_transform=None, - ) diff --git a/src/frdc/load/label_studio.py b/src/frdc/load/label_studio.py index b8287ff3..6383cfe4 100644 --- a/src/frdc/load/label_studio.py +++ b/src/frdc/load/label_studio.py @@ -8,16 +8,6 @@ from frdc.conf import LABEL_STUDIO_CLIENT -# try: -# client.check_connection() -# except ConnectionError: -# raise ConnectionError( -# f"Could not connect to Label Studio at {LABEL_STUDIO_URL}. " -# "This uses Label Studio's check_connection() method," -# "which performs retries. " -# "Use utils.is_label_studio_up() as a faster alternative to check if " -# "Label Studio is up." -# ) logger = logging.getLogger(__name__) diff --git a/src/frdc/load/preset.py b/src/frdc/load/preset.py new file mode 100644 index 00000000..4f892349 --- /dev/null +++ b/src/frdc/load/preset.py @@ -0,0 +1,159 @@ +from __future__ import annotations + +import logging +from dataclasses import dataclass +from typing import Callable, Any + +import numpy as np +import torch +from torchvision.transforms.v2 import ( + Compose, + ToImage, + ToDtype, + Resize, +) + +from frdc.load.dataset import FRDCDataset, FRDCUnlabelledDataset + +logger = logging.getLogger(__name__) + + +# This curries the FRDCDataset class, so that we can shorthand the preset +# definitions. +@dataclass +class FRDCDatasetPartial: + """Partial class for FRDCDataset. + + Notes: + This is used internally by FRDCDatasetPreset to define the presets + in a more concise manner:: + + # Instead of + lambda *args, **kwargs: + FRDCDataset("chestnut_nature_park", "20201218", None, + *args, **kwargs) + + # Using partial, we can do this instead + FRDCDatasetPartial("chestnut_nature_park", "20201218", None)( + *args, **kwargs + ) + + See FRDCDatasetPreset for usage. + """ + + site: str + date: str + version: str | None + + def __call__( + self, + transform: Callable[[list[np.ndarray]], Any] = None, + target_transform: Callable[[list[str]], list[str]] = None, + use_legacy_bounds: bool = False, + ): + """Alias for labelled().""" + return self.labelled( + transform, + target_transform, + use_legacy_bounds, + ) + + def labelled( + self, + transform: Callable[[list[np.ndarray]], Any] = None, + target_transform: Callable[[list[str]], list[str]] = None, + use_legacy_bounds: bool = False, + ): + """Returns the Labelled Dataset.""" + return FRDCDataset( + self.site, + self.date, + self.version, + transform, + target_transform, + use_legacy_bounds, + ) + + def unlabelled( + self, + transform: Callable[[list[np.ndarray]], Any] = None, + target_transform: Callable[[list[str]], list[str]] = None, + use_legacy_bounds: bool = False, + ): + """Returns the Unlabelled Dataset. + + Notes: + This simply masks away the labels during __getitem__. + The same behaviour can be achieved by setting __class__ to + FRDCUnlabelledDataset, but this is a more convenient way to do so. + """ + return FRDCUnlabelledDataset( + self.site, + self.date, + self.version, + transform, + target_transform, + use_legacy_bounds, + ) + + +@dataclass +class FRDCDatasetPreset: + """Presets for the FRDCDataset. + + Examples: + Each variable is a preset for the FRDCDataset. + + You can use it like this:: + + FRDCDatasetPreset.chestnut_20201218() + + Which returns a FRDCDataset. + + Furthermore, if you're interested in the unlabelled dataset, you can + use:: + + FRDCDatasetPreset.chestnut_20201218.unlabelled() + + Which returns a FRDCUnlabelledDataset. + + If you'd like to keep the syntax consistent for labelled and unlabelled + datasets, you can use:: + + FRDCDatasetPreset.chestnut_20201218.labelled() + FRDCDatasetPreset.chestnut_20201218.unlabelled() + + The `labelled` method is simply an alias for the `__call__` method. + + The DEBUG dataset is a special dataset that is used for debugging, + which pulls from GCS a small cropped image and dummy label + bounds. + + """ + + chestnut_20201218 = FRDCDatasetPartial( + "chestnut_nature_park", "20201218", None + ) + chestnut_20210510_43m = FRDCDatasetPartial( + "chestnut_nature_park", "20210510", "90deg43m85pct255deg" + ) + chestnut_20210510_60m = FRDCDatasetPartial( + "chestnut_nature_park", "20210510", "90deg60m84.5pct255deg" + ) + casuarina_20220418_183deg = FRDCDatasetPartial( + "casuarina_nature_park", "20220418", "183deg" + ) + casuarina_20220418_93deg = FRDCDatasetPartial( + "casuarina_nature_park", "20220418", "93deg" + ) + DEBUG = lambda resize=299: FRDCDatasetPartial( + site="DEBUG", date="0", version=None + )( + transform=Compose( + [ + ToImage(), + ToDtype(torch.float32), + Resize((resize, resize)), + ] + ), + target_transform=None, + ) diff --git a/src/frdc/train/frdc_datamodule.py b/src/frdc/train/frdc_datamodule.py index 9f35ce03..cabcb604 100644 --- a/src/frdc/train/frdc_datamodule.py +++ b/src/frdc/train/frdc_datamodule.py @@ -1,14 +1,11 @@ from __future__ import annotations from dataclasses import dataclass -from types import MethodType -from typing import Any from lightning import LightningDataModule from torch.utils.data import DataLoader, RandomSampler -from frdc.load import FRDCDataset -from frdc.load.dataset import FRDCUnlabelledDataset +from frdc.load.dataset import FRDCDataset, FRDCUnlabelledDataset @dataclass diff --git a/src/frdc/utils/training.py b/src/frdc/utils/training.py index 87593f89..d8130b3a 100644 --- a/src/frdc/utils/training.py +++ b/src/frdc/utils/training.py @@ -8,7 +8,7 @@ from sklearn.metrics import confusion_matrix from torch.utils.data import DataLoader -from frdc.load import FRDCDataset +from frdc.load.dataset import FRDCDataset def get_latest_ckpt_path(search_dir: Path, extention: str = "ckpt"): diff --git a/tests/conftest.py b/tests/conftest.py index d420f691..b7bf6357 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,8 +1,8 @@ import numpy as np import pytest -from frdc.load import FRDCDataset -from frdc.load.dataset import FRDCDatasetPreset +from frdc.load.dataset import FRDCDataset +from frdc.load.preset import FRDCDatasetPreset @pytest.fixture(scope="session") diff --git a/tests/model_tests/chestnut_dec_may/train.py b/tests/model_tests/chestnut_dec_may/train.py index df2d6814..70f3dada 100644 --- a/tests/model_tests/chestnut_dec_may/train.py +++ b/tests/model_tests/chestnut_dec_may/train.py @@ -22,7 +22,7 @@ from lightning.pytorch.loggers import WandbLogger from sklearn.preprocessing import StandardScaler, OrdinalEncoder -from frdc.load.dataset import FRDCDatasetPreset as ds +from frdc.load.preset import FRDCDatasetPreset as ds from frdc.models.inceptionv3 import InceptionV3MixMatchModule from frdc.train.frdc_datamodule import FRDCDataModule from model_tests.utils import ( diff --git a/tests/model_tests/utils.py b/tests/model_tests/utils.py index f578a87e..bc820f53 100644 --- a/tests/model_tests/utils.py +++ b/tests/model_tests/utils.py @@ -14,7 +14,7 @@ ) from torchvision.transforms.v2 import RandomHorizontalFlip -from frdc.load import FRDCDataset +from frdc.load.dataset import FRDCDataset from frdc.models.inceptionv3 import InceptionV3MixMatchModule THIS_DIR = Path(__file__).parent From 314774c4b2c809292aaa323c1246ae674f93eb1b Mon Sep 17 00:00:00 2001 From: Evening Date: Fri, 29 Dec 2023 11:50:37 +0800 Subject: [PATCH 17/18] Update docs to prefer preset --- Writerside/topics/Retrieve-our-Datasets.md | 15 ++++++++------- Writerside/topics/load.dataset.md | 6 ++---- .../topics/preprocessing.extract_segments.md | 12 ++++-------- Writerside/topics/preprocessing.morphology.md | 6 ++---- Writerside/topics/preprocessing.scale.md | 6 ++---- 5 files changed, 18 insertions(+), 27 deletions(-) diff --git a/Writerside/topics/Retrieve-our-Datasets.md b/Writerside/topics/Retrieve-our-Datasets.md index 46141afe..9c671cbd 100644 --- a/Writerside/topics/Retrieve-our-Datasets.md +++ b/Writerside/topics/Retrieve-our-Datasets.md @@ -25,16 +25,17 @@ Here, we'll download and load our - `labels`: The labels of the trees (segments) ```python -from frdc.load.dataset import FRDCDataset +from frdc.load.preset import FRDCDatasetPreset -ds = FRDCDataset(site="chestnut_nature_park", date="20201218", version=None) +ds = FRDCDatasetPreset.chestnut_20201218() ar, order = ds.get_ar_bands() bounds, labels = ds.get_bounds_and_labels() ``` ### What Datasets are there? {collapsible="true"} -> To know what datasets are available, you can run +> We recommend to use FRDCDatasetPreset. However, if you want +> to know what other datasets are available, you can run > [load.gcs](load.gcs.md)'s `list_gcs_datasets()` > method @@ -86,10 +87,10 @@ To segment the data, use [Extract Segments](preprocessing.extract_segments.md). Here, we'll segment the data by the bounds. ```python -from frdc.load.dataset import FRDCDataset +from frdc.load.preset import FRDCDatasetPreset from frdc.preprocess.extract_segments import extract_segments_from_bounds -ds = FRDCDataset(site="chestnut_nature_park", date="20201218", version=None) +ds = FRDCDatasetPreset.chestnut_20201218() ar, order = ds.get_ar_bands() bounds, labels = ds.get_bounds_and_labels() segments = extract_segments_from_bounds(ar, bounds) @@ -109,11 +110,11 @@ We can then use these data to plot out the first tree segment. ```python import matplotlib.pyplot as plt -from frdc.load.dataset import FRDCDataset +from frdc.load.preset import FRDCDatasetPreset from frdc.preprocess.extract_segments import extract_segments_from_bounds from frdc.preprocess.scale import scale_0_1_per_band -ds = FRDCDataset(site="chestnut_nature_park", date="20201218", version=None) +ds = FRDCDatasetPreset.chestnut_20201218() ar, order = ds.get_ar_bands() bounds, labels = ds.get_bounds_and_labels() segments = extract_segments_from_bounds(ar, bounds) diff --git a/Writerside/topics/load.dataset.md b/Writerside/topics/load.dataset.md index 0c5dbf24..7cbf6cbc 100644 --- a/Writerside/topics/load.dataset.md +++ b/Writerside/topics/load.dataset.md @@ -17,11 +17,9 @@ version. For example, to load our Chestnut Nature Park dataset. ```python -from frdc.load.dataset import FRDCDataset +from frdc.load.preset import FRDCDatasetPreset -ds = FRDCDataset(site='chestnut_nature_park', - date='20201218', - version=None) +ds = FRDCDatasetPreset.chestnut_20201218() ``` Then, we can use the `ds` object to load objects of the dataset: diff --git a/Writerside/topics/preprocessing.extract_segments.md b/Writerside/topics/preprocessing.extract_segments.md index 6f422569..a83b3060 100644 --- a/Writerside/topics/preprocessing.extract_segments.md +++ b/Writerside/topics/preprocessing.extract_segments.md @@ -135,12 +135,10 @@ Extract segments from bounds and labels. ```python import numpy as np -from frdc.load.dataset import FRDCDataset +from frdc.load.preset import FRDCDatasetPreset from frdc.preprocess.extract_segments import extract_segments_from_bounds -ds = FRDCDataset(site='chestnut_nature_park', - date='20201218', - version=None, ) +ds = FRDCDatasetPreset.chestnut_20201218() ar, order = ds.get_ar_bands() bounds, labels = ds.get_bounds_and_labels() @@ -155,7 +153,7 @@ Extract segments from a label classification. from skimage.morphology import remove_small_objects, remove_small_holes import numpy as np -from frdc.load.dataset import FRDCDataset +from frdc.load.preset import FRDCDatasetPreset from frdc.preprocess.morphology import ( threshold_binary_mask, binary_watershed ) @@ -164,9 +162,7 @@ from frdc.preprocess.extract_segments import ( extract_segments_from_labels, remove_small_segments_from_labels ) -ds = FRDCDataset(site='chestnut_nature_park', - date='20201218', - version=None, ) +ds = FRDCDatasetPreset.chestnut_20201218() ar, order = ds.get_ar_bands() ar = scale_0_1_per_band(ar) ar_mask = threshold_binary_mask(ar, -1, 90 / 256) diff --git a/Writerside/topics/preprocessing.morphology.md b/Writerside/topics/preprocessing.morphology.md index 062eb464..95289404 100644 --- a/Writerside/topics/preprocessing.morphology.md +++ b/Writerside/topics/preprocessing.morphology.md @@ -29,14 +29,12 @@ classification Perform auto-segmentation on a dataset to yield a label classification. ```python -from frdc.load.dataset import FRDCDataset +from frdc.load.preset import FRDCDatasetPreset from frdc.preprocess.morphology import ( threshold_binary_mask, binary_watershed ) -ds = FRDCDataset(site='chestnut_nature_park', - date='20201218', - version=None, ) +ds = FRDCDatasetPreset.chestnut_20201218() ar, order = ds.get_ar_bands() mask = threshold_binary_mask(ar, order.index('NIR'), 90 / 256) ar_label = binary_watershed(mask) diff --git a/Writerside/topics/preprocessing.scale.md b/Writerside/topics/preprocessing.scale.md index 513e9d5e..0b0e5946 100644 --- a/Writerside/topics/preprocessing.scale.md +++ b/Writerside/topics/preprocessing.scale.md @@ -35,15 +35,13 @@ Take a look at frdc.conf.BAND_MAX_CONFIG for an example. ## Usage ```python -from frdc.load.dataset import FRDCDataset +from frdc.load.preset import FRDCDatasetPreset from frdc.preprocess.scale import ( scale_0_1_per_band, scale_normal_per_band, scale_static_per_band ) from frdc.conf import BAND_MAX_CONFIG -ds = FRDCDataset(site='chestnut_nature_park', - date='20201218', - version=None, ) +ds = FRDCDatasetPreset.chestnut_20201218() ar, order = ds.get_ar_bands() ar_01 = scale_0_1_per_band(ar) ar_norm = scale_normal_per_band(ar) From 1673227701272a2e210578927a8202dc2e352352 Mon Sep 17 00:00:00 2001 From: Evening Date: Fri, 29 Dec 2023 11:53:47 +0800 Subject: [PATCH 18/18] update html docs --- docs/HelpTOC.json | 2 +- docs/custom-k-aug-dataloaders.html | 6 ++--- docs/get-started-with-dev-containers.html | 4 ++-- docs/getting-started.html | 20 ++++++++-------- docs/icon-192.png | Bin 337 -> 0 bytes docs/icon-512.png | Bin 1103 -> 0 bytes docs/load-dataset.html | 14 +++++------ docs/load-gcs.html | 6 ++--- docs/mix-match-module.html | 12 +++++----- docs/mix-match.html | 2 +- docs/model-test-chestnut-may-dec.html | 2 +- docs/overview.html | 2 +- docs/preprocessing-extract-segments.html | 28 ++++++++++------------ docs/preprocessing-glcm-padded.html | 4 ++-- docs/preprocessing-morphology.html | 12 ++++------ docs/preprocessing-scale.html | 10 ++++---- docs/retrieve-our-datasets.html | 22 ++++++++--------- docs/site.webmanifest | 11 --------- docs/train-frdc-lightning.html | 4 ++-- 19 files changed, 70 insertions(+), 91 deletions(-) delete mode 100644 docs/icon-192.png delete mode 100644 docs/icon-512.png delete mode 100644 docs/site.webmanifest diff --git a/docs/HelpTOC.json b/docs/HelpTOC.json index d0eb4e12..5467dd78 100644 --- a/docs/HelpTOC.json +++ b/docs/HelpTOC.json @@ -1 +1 @@ -{"entities":{"pages":{"Overview":{"id":"Overview","title":"Overview","url":"overview.html","level":0,"tabIndex":0},"Getting-Started":{"id":"Getting-Started","title":"Getting Started","url":"getting-started.html","level":0,"pages":["Get-Started-with-Dev-Containers"],"tabIndex":1},"Get-Started-with-Dev-Containers":{"id":"Get-Started-with-Dev-Containers","title":"Get Started with Dev Containers","url":"get-started-with-dev-containers.html","level":1,"parentId":"Getting-Started","tabIndex":0},"e8e19623_66291":{"id":"e8e19623_66291","title":"Tutorials","level":0,"pages":["Retrieve-our-Datasets"],"tabIndex":2},"Retrieve-our-Datasets":{"id":"Retrieve-our-Datasets","title":"Retrieve our Datasets","url":"retrieve-our-datasets.html","level":1,"parentId":"e8e19623_66291","tabIndex":0},"mix-match":{"id":"mix-match","title":"MixMatch","url":"mix-match.html","level":0,"pages":["mix-match-module","custom-k-aug-dataloaders"],"tabIndex":3},"mix-match-module":{"id":"mix-match-module","title":"MixMatch Module","url":"mix-match-module.html","level":1,"parentId":"mix-match","tabIndex":0},"custom-k-aug-dataloaders":{"id":"custom-k-aug-dataloaders","title":"Custom K-Aug Dataloaders","url":"custom-k-aug-dataloaders.html","level":1,"parentId":"mix-match","tabIndex":1},"e8e19623_66296":{"id":"e8e19623_66296","title":"Model Tests","level":0,"pages":["Model-Test-Chestnut-May-Dec"],"tabIndex":4},"Model-Test-Chestnut-May-Dec":{"id":"Model-Test-Chestnut-May-Dec","title":"Model Test Chestnut May-Dec","url":"model-test-chestnut-may-dec.html","level":1,"parentId":"e8e19623_66296","tabIndex":0},"e8e19623_66298":{"id":"e8e19623_66298","title":"API","level":0,"pages":["load.dataset","load.gcs","preprocessing.scale","preprocessing.extract_segments","preprocessing.morphology","preprocessing.glcm_padded","train.frdc_lightning"],"tabIndex":5},"load.dataset":{"id":"load.dataset","title":"load.dataset","url":"load-dataset.html","level":1,"parentId":"e8e19623_66298","tabIndex":0},"load.gcs":{"id":"load.gcs","title":"load.gcs","url":"load-gcs.html","level":1,"parentId":"e8e19623_66298","tabIndex":1},"preprocessing.scale":{"id":"preprocessing.scale","title":"preprocessing.scale","url":"preprocessing-scale.html","level":1,"parentId":"e8e19623_66298","tabIndex":2},"preprocessing.extract_segments":{"id":"preprocessing.extract_segments","title":"preprocessing.extract_segments","url":"preprocessing-extract-segments.html","level":1,"parentId":"e8e19623_66298","tabIndex":3},"preprocessing.morphology":{"id":"preprocessing.morphology","title":"preprocessing.morphology","url":"preprocessing-morphology.html","level":1,"parentId":"e8e19623_66298","tabIndex":4},"preprocessing.glcm_padded":{"id":"preprocessing.glcm_padded","title":"preprocessing.glcm_padded","url":"preprocessing-glcm-padded.html","level":1,"parentId":"e8e19623_66298","tabIndex":5},"train.frdc_lightning":{"id":"train.frdc_lightning","title":"train.frdc_datamodule \u0026 frdc_module","url":"train-frdc-lightning.html","level":1,"parentId":"e8e19623_66298","tabIndex":6}}},"topLevelIds":["Overview","Getting-Started","e8e19623_66291","mix-match","e8e19623_66296","e8e19623_66298"]} \ No newline at end of file +{"entities":{"pages":{"Overview":{"id":"Overview","title":"Overview","url":"overview.html","level":0,"tabIndex":0},"Getting-Started":{"id":"Getting-Started","title":"Getting Started","url":"getting-started.html","level":0,"pages":["Get-Started-with-Dev-Containers"],"tabIndex":1},"Get-Started-with-Dev-Containers":{"id":"Get-Started-with-Dev-Containers","title":"Get Started with Dev Containers","url":"get-started-with-dev-containers.html","level":1,"parentId":"Getting-Started","tabIndex":0},"f6c570e4_4234":{"id":"f6c570e4_4234","title":"Tutorials","level":0,"pages":["Retrieve-our-Datasets"],"tabIndex":2},"Retrieve-our-Datasets":{"id":"Retrieve-our-Datasets","title":"Retrieve our Datasets","url":"retrieve-our-datasets.html","level":1,"parentId":"f6c570e4_4234","tabIndex":0},"mix-match":{"id":"mix-match","title":"MixMatch","url":"mix-match.html","level":0,"pages":["mix-match-module","custom-k-aug-dataloaders"],"tabIndex":3},"mix-match-module":{"id":"mix-match-module","title":"MixMatch Module","url":"mix-match-module.html","level":1,"parentId":"mix-match","tabIndex":0},"custom-k-aug-dataloaders":{"id":"custom-k-aug-dataloaders","title":"Custom K-Aug Dataloaders","url":"custom-k-aug-dataloaders.html","level":1,"parentId":"mix-match","tabIndex":1},"f6c570e4_4239":{"id":"f6c570e4_4239","title":"Model Tests","level":0,"pages":["Model-Test-Chestnut-May-Dec"],"tabIndex":4},"Model-Test-Chestnut-May-Dec":{"id":"Model-Test-Chestnut-May-Dec","title":"Model Test Chestnut May-Dec","url":"model-test-chestnut-may-dec.html","level":1,"parentId":"f6c570e4_4239","tabIndex":0},"f6c570e4_4241":{"id":"f6c570e4_4241","title":"API","level":0,"pages":["load.dataset","load.gcs","preprocessing.scale","preprocessing.extract_segments","preprocessing.morphology","preprocessing.glcm_padded","train.frdc_lightning"],"tabIndex":5},"load.dataset":{"id":"load.dataset","title":"load.dataset","url":"load-dataset.html","level":1,"parentId":"f6c570e4_4241","tabIndex":0},"load.gcs":{"id":"load.gcs","title":"load.gcs","url":"load-gcs.html","level":1,"parentId":"f6c570e4_4241","tabIndex":1},"preprocessing.scale":{"id":"preprocessing.scale","title":"preprocessing.scale","url":"preprocessing-scale.html","level":1,"parentId":"f6c570e4_4241","tabIndex":2},"preprocessing.extract_segments":{"id":"preprocessing.extract_segments","title":"preprocessing.extract_segments","url":"preprocessing-extract-segments.html","level":1,"parentId":"f6c570e4_4241","tabIndex":3},"preprocessing.morphology":{"id":"preprocessing.morphology","title":"preprocessing.morphology","url":"preprocessing-morphology.html","level":1,"parentId":"f6c570e4_4241","tabIndex":4},"preprocessing.glcm_padded":{"id":"preprocessing.glcm_padded","title":"preprocessing.glcm_padded","url":"preprocessing-glcm-padded.html","level":1,"parentId":"f6c570e4_4241","tabIndex":5},"train.frdc_lightning":{"id":"train.frdc_lightning","title":"train.frdc_datamodule \u0026 frdc_module","url":"train-frdc-lightning.html","level":1,"parentId":"f6c570e4_4241","tabIndex":6}}},"topLevelIds":["Overview","Getting-Started","f6c570e4_4234","mix-match","f6c570e4_4239","f6c570e4_4241"]} \ No newline at end of file diff --git a/docs/custom-k-aug-dataloaders.html b/docs/custom-k-aug-dataloaders.html index 4863aad8..473d540d 100644 --- a/docs/custom-k-aug-dataloaders.html +++ b/docs/custom-k-aug-dataloaders.html @@ -1,4 +1,4 @@ - Custom K-Aug Dataloaders | Documentation

Documentation 0.0.8 Help

Custom K-Aug Dataloaders

In MixMatch, implementing the data loading methods is quite unconventional.

  1. We need to load multiple augmented versions of the same image into the same batch.

  2. The labelled set is usually too small, causing a premature end to the epoch as it runs out of samples to draw from faster than the unlabelled set.

This can be rather tricky to implement in PyTorch. This tutorial will illustrate how we did it.

Loading Multiple Augmented Versions of the Same Image

See: frdc/load/dataset.py FRDCDataset.__getitem__

In MixMatch, a single train batch must consist of:

  1. A batch of labeled images

  2. K batches of unlabeled images

Aug
Aug
Aug
Aug
Get Batch
Aug Labelled Batch
Unlabelled Batch
Aug Unl. Batch 1
Aug Unl. Batch i
Aug Unl. Batch K

Keep in mind that the unlabelled batch, is a single batch of images, not separate draws of batches. It is then "duplicated" K times, and each copy is augmented differently.

Solution 1: Custom Dataset

To solve this, we need to understand the role of both a Dataset and a DataLoader.

  • A Dataset represents a collection of data, responsible for loading and returning something.

  • A DataLoader draws samples from a Dataset and returns batched samples.

The key here is that a Dataset is not limited to returning 1 sample at a time, we can make it return the K augmented versions of the same image.

Aug
Aug
Aug
Sample
Aug Sample 1
Aug Sample i
Aug Sample K

In code, this is done by subclassing the Dataset class and overriding the __getitem__ method.

+ Custom K-Aug Dataloaders | Documentation

Documentation 0.0.8 Help

Custom K-Aug Dataloaders

In MixMatch, implementing the data loading methods is quite unconventional.

  1. We need to load multiple augmented versions of the same image into the same batch.

  2. The labelled set is usually too small, causing a premature end to the epoch as it runs out of samples to draw from faster than the unlabelled set.

This can be rather tricky to implement in PyTorch. This tutorial will illustrate how we did it.

Loading Multiple Augmented Versions of the Same Image

See: frdc/load/dataset.py FRDCDataset.__getitem__

In MixMatch, a single train batch must consist of:

  1. A batch of labeled images

  2. K batches of unlabeled images

Aug
Aug
Aug
Aug
Get Batch
Aug Labelled Batch
Unlabelled Batch
Aug Unl. Batch 1
Aug Unl. Batch i
Aug Unl. Batch K

Keep in mind that the unlabelled batch, is a single batch of images, not separate draws of batches. It is then "duplicated" K times, and each copy is augmented differently.

Solution 1: Custom Dataset

To solve this, we need to understand the role of both a Dataset and a DataLoader.

  • A Dataset represents a collection of data, responsible for loading and returning something.

  • A DataLoader draws samples from a Dataset and returns batched samples.

The key here is that a Dataset is not limited to returning 1 sample at a time, we can make it return the K augmented versions of the same image.

Aug
Aug
Aug
Sample
Aug Sample 1
Aug Sample i
Aug Sample K

In code, this is done by subclassing the Dataset class and overriding the __getitem__ method.

def duplicate(x): return x, deepcopy(x), deepcopy(x) @@ -10,7 +10,7 @@ def __getitem__(self, index): x, y = self.dataset[index] return self.aug(x), y -

In the above example, we have a Dataset that returns 3 duplicate versions of the same image. By leveraging this technique, we can create a Dataset that returns K augmented versions of the same image as a tuple

Premature End of Epoch due to Small Labelled Set

See: frdc/train/frdc_datamodule.py

In MixMatch, the definition of an "epoch" is a bit different. Instead of implying that we have seen all the data once, it implies that we've drawn N batches. The N is referred to as the number of iterations per epoch.

Take for example, a labelled set of numbers [1, 2, 3] and an unlabelled set [4, 5, 6, 7, 8, 9, 10]. With batch size of 2, we'll run out of labelled samples after 2 iterations, but we'll still have 3 more iterations for the unlabelled set.

  • Draw 1: [1, 2], [4, 5]

  • Draw 2: [3], [6, 7].

  • Epoch ends.

Solution 2: Random Sampling

To fix this, instead of sequentially sampling the labelled set (and the unlabelled set), we can sample them randomly. This way, we can ensure that it never runs out.

  • Draw 1: [1, 3], [7, 5]

  • Draw 2: [2, 1], [4, 9]

  • Draw 3: [3, 2], [8, 6]

  • ... and so on.

Luckily, PyTorch's DataLoader supports random sampling. We just need to use RandomSampler instead of SequentialSampler (which is the default).

+

In the above example, we have a Dataset that returns 3 duplicate versions of the same image. By leveraging this technique, we can create a Dataset that returns K augmented versions of the same image as a tuple

Premature End of Epoch due to Small Labelled Set

See: frdc/train/frdc_datamodule.py

In MixMatch, the definition of an "epoch" is a bit different. Instead of implying that we have seen all the data once, it implies that we've drawn N batches. The N is referred to as the number of iterations per epoch.

Take for example, a labelled set of numbers [1, 2, 3] and an unlabelled set [4, 5, 6, 7, 8, 9, 10]. With batch size of 2, we'll run out of labelled samples after 2 iterations, but we'll still have 3 more iterations for the unlabelled set.

  • Draw 1: [1, 2], [4, 5]

  • Draw 2: [3], [6, 7].

  • Epoch ends.

Solution 2: Random Sampling

To fix this, instead of sequentially sampling the labelled set (and the unlabelled set), we can sample them randomly. This way, we can ensure that it never runs out.

  • Draw 1: [1, 3], [7, 5]

  • Draw 2: [2, 1], [4, 9]

  • Draw 3: [3, 2], [8, 6]

  • ... and so on.

Luckily, PyTorch's DataLoader supports random sampling. We just need to use RandomSampler instead of SequentialSampler (which is the default).

from torch.utils.data import DataLoader, RandomSampler dl = DataLoader( @@ -21,4 +21,4 @@ replacement=False, ) ) -

This will ensure that the "epoch" ends when we've drawn train_iters batches

Last modified: 27 December 2023
\ No newline at end of file +

This will ensure that the "epoch" ends when we've drawn train_iters batches

Last modified: 29 December 2023
\ No newline at end of file diff --git a/docs/get-started-with-dev-containers.html b/docs/get-started-with-dev-containers.html index 5d793d5a..be7dc455 100644 --- a/docs/get-started-with-dev-containers.html +++ b/docs/get-started-with-dev-containers.html @@ -1,3 +1,3 @@ - Get Started with Dev Containers | Documentation

Documentation 0.0.8 Help

Get Started with Dev Containers

Dev. Containers are a great way to get started with a project. They define all necessary dependencies and environments, so you can just start coding within the container.

In this article, we'll only go over additional steps to set up with our project. For more information on how to use Dev Containers, please refer to the official documentation for each IDE. Once you've set up the Dev Container, come back here to finish the setup:

Python Environment

The dev environment is already created and is managed by Anaconda /opt/conda/bin/conda. To activate the environment, run the following command:

+ Get Started with Dev Containers | Documentation

Documentation 0.0.8 Help

Get Started with Dev Containers

Dev. Containers are a great way to get started with a project. They define all necessary dependencies and environments, so you can just start coding within the container.

In this article, we'll only go over additional steps to set up with our project. For more information on how to use Dev Containers, please refer to the official documentation for each IDE. Once you've set up the Dev Container, come back here to finish the setup:

Python Environment

The dev environment is already created and is managed by Anaconda /opt/conda/bin/conda. To activate the environment, run the following command:

conda activate base -

Mark as Sources Root (Add to PYTHONPATH)

For import statements to work, you need to mark the src folder as the sources root. Optionally, also mark the tests folder as the tests root.

Additional Setup

Refer to the Getting Started guide for additional setup steps such as:

  • Google Cloud Application Default Credentials

  • Weight & Bias API Key

  • Label Studio API Key

Last modified: 27 December 2023
\ No newline at end of file +

Mark as Sources Root (Add to PYTHONPATH)

For import statements to work, you need to mark the src folder as the sources root. Optionally, also mark the tests folder as the tests root.

Additional Setup

Refer to the Getting Started guide for additional setup steps such as:

  • Google Cloud Application Default Credentials

  • Weight & Bias API Key

  • Label Studio API Key

Last modified: 29 December 2023
\ No newline at end of file diff --git a/docs/getting-started.html b/docs/getting-started.html index 8f064904..8bc4700d 100644 --- a/docs/getting-started.html +++ b/docs/getting-started.html @@ -1,22 +1,22 @@ - Getting Started | Documentation

Documentation 0.0.8 Help

Getting Started

Installing the Dev. Environment

  1. Ensure that you have the right version of Python. The required Python version can be seen in pyproject.toml

    + Getting Started | Documentation

    Documentation 0.0.8 Help

    Getting Started

    Installing the Dev. Environment

    1. Ensure that you have the right version of Python. The required Python version can be seen in pyproject.toml

      [tool.poetry.dependencies] python = "..." -
    2. Start by cloning our repository.

      +
    3. Start by cloning our repository.

      git clone https://github.com/FR-DC/FRDC-ML.git -
    4. Then, create a Python Virtual Env pyvenv

      python -m venv venv/
      python3 -m venv venv/
    5. Install Poetry Then check if it's installed with

      poetry --version
    6. Activate the virtual environment

      +
    7. Then, create a Python Virtual Env pyvenv

      python -m venv venv/
      python3 -m venv venv/
    8. Install Poetry Then check if it's installed with

      poetry --version
    9. Activate the virtual environment

      cd venv/Scripts activate cd ../.. -
      +
      source venv/bin/activate -
    10. Install the dependencies. You should be in the same directory as pyproject.toml

      +
  2. Install the dependencies. You should be in the same directory as pyproject.toml

    poetry install --with dev -
  3. Install Pre-Commit Hooks

    +
  4. Install Pre-Commit Hooks

    pre-commit install -

Setting Up Google Cloud

  1. We use Google Cloud to store our datasets. To set up Google Cloud, install the Google Cloud CLI

  2. Then, authenticate your account.

    gcloud auth login
  3. Finally, set up Application Default Credentials (ADC).

    gcloud auth application-default login
  4. To make sure everything is working, run the tests.

Setting Up Label Studio

  1. We use Label Studio to annotate our datasets. We won't go through how to install Label Studio, for contributors, it should be up on localhost:8080.

  2. Then, retrieve your own API key from Label Studio. Go to your account page and copy the API key.


  3. Set your API key as an environment variable.

    In Windows, go to "Edit environment variables for your account" and add this as a new environment variable with name LABEL_STUDIO_API_KEY.

    Export it as an environment variable.

    export LABEL_STUDIO_API_KEY=...

Setting Up Weight and Biases

  1. We use W&B to track our experiments. To set up W&B, install the W&B CLI

  2. Then, authenticate your account.

    wandb login

Pre-commit Hooks

  • +

Setting Up Google Cloud

  1. We use Google Cloud to store our datasets. To set up Google Cloud, install the Google Cloud CLI

  2. Then, authenticate your account.

    gcloud auth login
  3. Finally, set up Application Default Credentials (ADC).

    gcloud auth application-default login
  4. To make sure everything is working, run the tests.

Setting Up Label Studio

  1. We use Label Studio to annotate our datasets. We won't go through how to install Label Studio, for contributors, it should be up on localhost:8080.

  2. Then, retrieve your own API key from Label Studio. Go to your account page and copy the API key.


  3. Set your API key as an environment variable.

    In Windows, go to "Edit environment variables for your account" and add this as a new environment variable with name LABEL_STUDIO_API_KEY.

    Export it as an environment variable.

    export LABEL_STUDIO_API_KEY=...

Setting Up Weight and Biases

  1. We use W&B to track our experiments. To set up W&B, install the W&B CLI

  2. Then, authenticate your account.

    wandb login

Pre-commit Hooks

  • pre-commit install -

Running the Tests

  • Run the tests to make sure everything is working

    +

Running the Tests

  • Run the tests to make sure everything is working

    pytest -

Troubleshooting

ModuleNotFoundError

It's likely that your src and tests directories are not in PYTHONPATH. To fix this, run the following command:

+

Troubleshooting

ModuleNotFoundError

It's likely that your src and tests directories are not in PYTHONPATH. To fix this, run the following command:

export PYTHONPATH=$PYTHONPATH:./src:./tests -

Or, set it in your IDE, for example, IntelliJ allows setting directories as Source Roots.

google.auth.exceptions.DefaultCredentialsError

It's likely that you haven't authenticated your Google Cloud account. See Setting Up Google Cloud

Couldn't connect to Label Studio

Label Studio must be running locally, exposed on localhost:8080. Furthermore, you need to specify the LABEL_STUDIO_API_KEY environment variable. See Setting Up Label Studio

Cannot login to W&B

You need to authenticate your W&B account. See Setting Up Weight and Biases If you're facing difficulties, set the WANDB_MODE environment variable to offline to disable W&B.

Our Repository Structure

Before starting development, take a look at our repository structure. This will help you understand where to put your code.

Core Dependencies
Resources
Tests
Repo Dependencies
Dataset Loaders
Preprocessing Fn.
Train Deps
Model Architectures
Datasets ...
FRDC
src/frdc/
rsc/
tests/
pyproject.toml,poetry.lock
./load/
./preprocess/
./train/
./models/
./dataset_name/
src/frdc/

Source Code for our package. These are the unit components of our pipeline.

rsc/

Resources. These are usually cached datasets

tests/

PyTest tests. These are unit, integration, and model tests.

Unit, Integration, and Pipeline Tests

We have 3 types of tests:

  • Unit Tests are usually small, single function tests.

  • Integration Tests are larger tests that tests a mock pipeline.

  • Model Tests are the true production pipeline tests that will generate a model.

Where Should I contribute?

Changing a small component

If you're changing a small component, such as a argument for preprocessing, a new model architecture, or a new configuration for a dataset, take a look at the src/frdc/ directory.

Adding a test

By adding a new component, you'll need to add a new test. Take a look at the tests/ directory.

Changing the model pipeline

If you're a ML Researcher, you'll probably be changing the pipeline. Take a look at the tests/model_tests/ directory.

Adding a dependency

If you're adding a new dependency, use poetry add PACKAGE and commit the changes to pyproject.toml and poetry.lock.

Last modified: 27 December 2023
\ No newline at end of file +

Or, set it in your IDE, for example, IntelliJ allows setting directories as Source Roots.

google.auth.exceptions.DefaultCredentialsError

It's likely that you haven't authenticated your Google Cloud account. See Setting Up Google Cloud

Couldn't connect to Label Studio

Label Studio must be running locally, exposed on localhost:8080. Furthermore, you need to specify the LABEL_STUDIO_API_KEY environment variable. See Setting Up Label Studio

Cannot login to W&B

You need to authenticate your W&B account. See Setting Up Weight and Biases If you're facing difficulties, set the WANDB_MODE environment variable to offline to disable W&B.

Our Repository Structure

Before starting development, take a look at our repository structure. This will help you understand where to put your code.

Core Dependencies
Resources
Tests
Repo Dependencies
Dataset Loaders
Preprocessing Fn.
Train Deps
Model Architectures
Datasets ...
FRDC
src/frdc/
rsc/
tests/
pyproject.toml,poetry.lock
./load/
./preprocess/
./train/
./models/
./dataset_name/
src/frdc/

Source Code for our package. These are the unit components of our pipeline.

rsc/

Resources. These are usually cached datasets

tests/

PyTest tests. These are unit, integration, and model tests.

Unit, Integration, and Pipeline Tests

We have 3 types of tests:

  • Unit Tests are usually small, single function tests.

  • Integration Tests are larger tests that tests a mock pipeline.

  • Model Tests are the true production pipeline tests that will generate a model.

Where Should I contribute?

Changing a small component

If you're changing a small component, such as a argument for preprocessing, a new model architecture, or a new configuration for a dataset, take a look at the src/frdc/ directory.

Adding a test

By adding a new component, you'll need to add a new test. Take a look at the tests/ directory.

Changing the model pipeline

If you're a ML Researcher, you'll probably be changing the pipeline. Take a look at the tests/model_tests/ directory.

Adding a dependency

If you're adding a new dependency, use poetry add PACKAGE and commit the changes to pyproject.toml and poetry.lock.

Last modified: 29 December 2023
\ No newline at end of file diff --git a/docs/icon-192.png b/docs/icon-192.png deleted file mode 100644 index 5953601c396250504ba6b31c031ea906e92b6cd9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 337 zcmeAS@N?(olHy`uVBq!ia0vp^2SAvE2}s`E_d9@rflSGwb;dxs*#b$G<8}erCCjeWuCzLfaEd zD*IkLs+}#4;Wx^h_m~)^%s}g@2u|;lzdBFn<%jhd{?kf+gl|#zw)&%eYqF~BKhd`* zC-MF7`j+C^Uhi7HY02UJi)gTe~DWM4f DYn5)J diff --git a/docs/icon-512.png b/docs/icon-512.png deleted file mode 100644 index 9840e7b0cd4973a67d66ea20a62c77380047aed1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1103 zcmeAS@N?(olHy`uVBq!ia0y~yU;;9k7&t&wwUqN(1_l-}PZ!6KinzB|482$b1XvEf z|I5F&QoY|*z-`s1vsK?dmwsVZ>UO9HIT3v b7H)suWWQGE&SGE*!NB0@>gTe~DWM4f4BN6W diff --git a/docs/load-dataset.html b/docs/load-dataset.html index bd5caa57..8ae04891 100644 --- a/docs/load-dataset.html +++ b/docs/load-dataset.html @@ -1,14 +1,12 @@ - load.dataset | Documentation

Documentation 0.0.8 Help

load.dataset

Usage

Firstly, to load a dataset instance, you need to initiliaze a FRDCDataset object, providing the site, date, and version.

For example, to load our Chestnut Nature Park dataset.

-from frdc.load import FRDCDataset + load.dataset | Documentation

Documentation 0.0.8 Help

load.dataset

Usage

Firstly, to load a dataset instance, you need to initiliaze a FRDCDataset object, providing the site, date, and version.

For example, to load our Chestnut Nature Park dataset.

+from frdc.load.preset import FRDCDatasetPreset -ds = FRDCDataset(site='chestnut_nature_park', - date='20201218', - version=None) -

Then, we can use the ds object to load objects of the dataset:

+ds = FRDCDatasetPreset.chestnut_20201218() +

Then, we can use the ds object to load objects of the dataset:

ar, order = ds.get_ar_bands() d = ds.get_ar_bands_as_dict() bounds, labels = ds.get_bounds_and_labels() -
  • ar is a stacked NDArray of the hyperspectral bands of shape (H x W x C)

  • order is a list of strings, containing the names of the bands, ordered according to the channels of ar

  • d is a dictionary of the hyperspectral bands of shape (H x W), keyed by the band names

  • bounds is a list of bounding boxes, in the format of Rect, a namedtuple of x0, y0, x1, y1

  • labels is a list of strings, containing the labels of the bounding boxes, ordered according to bounds

Filters

You can also selectively get the channels for both get_ar_bands() and get_ar_bands_as_dict() by providing a list of strings to the bands argument.

For example, to get the Wideband RGB bands, you can do:

+
  • ar is a stacked NDArray of the hyperspectral bands of shape (H x W x C)

  • order is a list of strings, containing the names of the bands, ordered according to the channels of ar

  • d is a dictionary of the hyperspectral bands of shape (H x W), keyed by the band names

  • bounds is a list of bounding boxes, in the format of Rect, a namedtuple of x0, y0, x1, y1

  • labels is a list of strings, containing the labels of the bounding boxes, ordered according to bounds

Filters

You can also selectively get the channels for both get_ar_bands() and get_ar_bands_as_dict() by providing a list of strings to the bands argument.

For example, to get the Wideband RGB bands, you can do:

ar, order = ds.get_ar_bands(bands=['WR', 'WG', 'WB']) d = ds.get_ar_bands_as_dict(bands=['WR', 'WG', 'WB']) -

This will also alter the channel order to the order of the bands provided.

See load.gcs for configuration options.

Last modified: 27 December 2023
\ No newline at end of file +

This will also alter the channel order to the order of the bands provided.

See load.gcs for configuration options.

Last modified: 29 December 2023
\ No newline at end of file diff --git a/docs/load-gcs.html b/docs/load-gcs.html index c6faadf2..ff221005 100644 --- a/docs/load-gcs.html +++ b/docs/load-gcs.html @@ -1,11 +1,11 @@ - load.gcs | Documentation

Documentation 0.0.8 Help

load.gcs

Usage

These are defined in the top-level load.gcs module.

list_gcs_datasets

Lists all datasets in the bucket as a DataFrame. This works by checking which folders have a specific file, which we call the anchor.

download

Downloads a file from Google Cloud Storage and returns the local file path.

open_file

Downloads and opens a file from Google Cloud Storage. Returns a file handle.

open_image

Downloads and returns the PIL image from Google Cloud Storage.

Pathing

The path to specify is relative to the bucket, which is frdc-ds by default.

For example this filesystem on GCS:

+ load.gcs | Documentation

Documentation 0.0.8 Help

load.gcs

Usage

These are defined in the top-level load.gcs module.

list_gcs_datasets

Lists all datasets in the bucket as a DataFrame. This works by checking which folders have a specific file, which we call the anchor.

download

Downloads a file from Google Cloud Storage and returns the local file path.

open_file

Downloads and opens a file from Google Cloud Storage. Returns a file handle.

open_image

Downloads and returns the PIL image from Google Cloud Storage.

Pathing

The path to specify is relative to the bucket, which is frdc-ds by default.

For example this filesystem on GCS:

# On Google Cloud Storage frdc-ds ├── chestnut_nature_park │ └── 20201218 │ └── 90deg │ └── bounds.json -

To download bounds.json, use download(r"chestnut_nature_park/20201218/90deg/bounds.json"). By default, all files will be downloaded to PROJ_DIR/rsc/....

+

To download bounds.json, use download(r"chestnut_nature_park/20201218/90deg/bounds.json"). By default, all files will be downloaded to PROJ_DIR/rsc/....

# On local filesystem PROJ_DIR ├── rsc @@ -13,4 +13,4 @@ │ └── 20201218 │ └── 90deg │ └── bounds.json -

Configuration

If you need granular control over

  • where the files are downloaded

  • the credentials used

  • the project used

  • the bucket used

Then edit conf.py.

GCS_CREDENTIALS

Google Cloud credentials.


A google.oauth2.service_account.Credentials object. See the object documentation for more information.

LOCAL_DATASET_ROOT_DIR

Local directory to download files to.


Path to a directory, or a Path object.

GCS_PROJECT_ID

Google Cloud project ID.


GCS_BUCKET_NAME

Google Cloud Storage bucket name.


Last modified: 27 December 2023
\ No newline at end of file +

Configuration

If you need granular control over

  • where the files are downloaded

  • the credentials used

  • the project used

  • the bucket used

Then edit conf.py.

GCS_CREDENTIALS

Google Cloud credentials.


A google.oauth2.service_account.Credentials object. See the object documentation for more information.

LOCAL_DATASET_ROOT_DIR

Local directory to download files to.


Path to a directory, or a Path object.

GCS_PROJECT_ID

Google Cloud project ID.


GCS_BUCKET_NAME

Google Cloud Storage bucket name.


Last modified: 29 December 2023
\ No newline at end of file diff --git a/docs/mix-match-module.html b/docs/mix-match-module.html index e2512d44..9ef02365 100644 --- a/docs/mix-match-module.html +++ b/docs/mix-match-module.html @@ -1,4 +1,4 @@ - MixMatch Module | Documentation

Documentation 0.0.8 Help

MixMatch Module

See frdc/train/mixmatch_module.py.

Quick Recap

We will go over the essential parts of the code here. Before that, we revise some of the concepts that are used in the code.

Abstract Methods

In Python, we can define abstract methods using the abc module. Just like other OOP languages, abstract methods are methods that must be implemented by the child class.

For example:

+ MixMatch Module | Documentation

Documentation 0.0.8 Help

MixMatch Module

See frdc/train/mixmatch_module.py.

Quick Recap

We will go over the essential parts of the code here. Before that, we revise some of the concepts that are used in the code.

Abstract Methods

In Python, we can define abstract methods using the abc module. Just like other OOP languages, abstract methods are methods that must be implemented by the child class.

For example:

from abc import ABC, abstractmethod @@ -11,7 +11,7 @@ class MyChildClass(MyAbstractClass): def my_abstract_method(self): print("Hello World!") -

nn.Module & LightningModule

If you're unfamiliar with PyTorch, you should read the nn.Module Documentation.

nn.Module is the base class for all neural network modules in PyTorch. While LightningModule is a PyTorch Lightning class that extends nn.Module, providing it with additional functionality that reduces boilerplate code.

By implementing it as a LightningModule, we also enter the PyTorch Lightning ecosystem, which provides us with a lot of useful features such as logging, early stopping, and more.

What do we implement in a Module?

One key component that nn.Module requires, is the model. So for example:

+

nn.Module & LightningModule

If you're unfamiliar with PyTorch, you should read the nn.Module Documentation.

nn.Module is the base class for all neural network modules in PyTorch. While LightningModule is a PyTorch Lightning class that extends nn.Module, providing it with additional functionality that reduces boilerplate code.

By implementing it as a LightningModule, we also enter the PyTorch Lightning ecosystem, which provides us with a lot of useful features such as logging, early stopping, and more.

What do we implement in a Module?

One key component that nn.Module requires, is the model. So for example:

class MyModule(nn.Module): def __init__(self): super().__init__() @@ -23,7 +23,7 @@ def forward(self, x): return self.model(x) -

PyTorch Lightning builds on top of it, requiring training_step and validation_step. Each "step" is a batch of data, and the model is trained on it. So for example:

+

PyTorch Lightning builds on top of it, requiring training_step and validation_step. Each "step" is a batch of data, and the model is trained on it. So for example:

class MyModule(LightningModule): def __init__(self): ... @@ -40,7 +40,7 @@ y_hat = self(x) loss = F.cross_entropy(y_hat, y) return loss -

Usually, the training and validation steps are the same, but in some cases, such as MixMatch, they are different. In MixMatch, we not only use a different loss function for train, we also handle a batch differently. The PyTorch Lightning framework allows us to separate the two, and implement them separately.

Model Embedded Preprocessing on_before_batch_transfer

In PyTorch Lightning, we can also inject a step before the batch is passed to the model. This is done by overriding the on_before_batch_transfer method.

Batch
on_before_batch_transfer
training_step
validation_step

This allows us to do preprocessing on the batch, such as scaling the data, encoding the labels, and more.

Custom EMA Update on_after_backward

We also leverage another hook, called on_after_backward. This hook is called after the backward pass, and allows us to do custom operations. In our case, we use it to update the EMA model.

Batch
training_step
on_after_backward
update_ema

MixMatch

We recommend having tests/model_tests/chestnut_dec_may/train.py open while reading this section. It implements a real-world example of MixMatch.

As a summary:

  1. We learned what is an abstract method, and how to implement it

  2. We implement the model in LightningModule much like we would in nn.Module

  3. We implement on_before_batch_transfer to preprocess the batch

  4. Finally, we implement on_after_backward to update the EMA model

With the above in mind, let's look at the MixMatch implementation.

forward (abstract)

Forward pass of the model

ema_model (abstract)

The model that is used for EMA. We expect this property to be implemented by the child class.

update_ema (abstract)

The method to update the EMA model. We expect this method to be implemented by the child class.

loss_unl_scaler (static)

Takes in the current progress of the training, 0.0 to 1.0, where 0.0 is the start of the training, and 1.0 is the end. Then, returns the multiplier for the unlabeled loss.

loss_lbl (static)

Implements the loss for labeled data. Takes in the predicted labels and the ground truth labels, and returns the loss. This is cross entropy for MixMatch.

loss_unl (static)

Implements the loss for unlabeled data. Takes in the predicted labels and the ground truth labels, and returns the loss. This is MSE for MixMatch.

mixup

Takes in the data and the labels, the beta distribution parameter, and returns the mixed data and labels.

sharpen

Takes in the labels and temperature, and returns the sharpened labels.

guess_labels

Takes in the unlabeled data, and returns the guessed labels.

progress

The current progress of the training, 0.0 to 1.0, where 0.0 is the start of the training, and 1.0 is the end.

training_step

The training step runs through 1 batch of data, and returns the loss. Note that this is significantly different from validation step, as we handle the K-Augmented data differently.

test / validation_step

The test / validation step runs through 1 batch of data, and returns the loss.

predict_step

The predict step runs through 1 batch of data, and returns the actual decoded labels.

on_after_backward

The on_after_backward hook is called after the backward pass, and allows us to do custom operations. In our case, we use it to update the EMA model.

on_before_batch_transfer

The on_before_batch_transfer hook is called before the batch is transferred to the GPU. In our case, we use it to preprocess the batch.

A diagram of how these components interact with each other is shown below:

Batch
on_before_batch_transfer
training_step
guess_labels
sharpen
mix_up
loss_unl
loss_unl_scaler
loss
loss_lbl
backward
on_after_backward
update_ema
validation_step
loss

Finally, we show an example of how to use the MixMatch module:

+

Usually, the training and validation steps are the same, but in some cases, such as MixMatch, they are different. In MixMatch, we not only use a different loss function for train, we also handle a batch differently. The PyTorch Lightning framework allows us to separate the two, and implement them separately.

Model Embedded Preprocessing on_before_batch_transfer

In PyTorch Lightning, we can also inject a step before the batch is passed to the model. This is done by overriding the on_before_batch_transfer method.

Batch
on_before_batch_transfer
training_step
validation_step

This allows us to do preprocessing on the batch, such as scaling the data, encoding the labels, and more.

Custom EMA Update on_after_backward

We also leverage another hook, called on_after_backward. This hook is called after the backward pass, and allows us to do custom operations. In our case, we use it to update the EMA model.

Batch
training_step
on_after_backward
update_ema

MixMatch

We recommend having tests/model_tests/chestnut_dec_may/train.py open while reading this section. It implements a real-world example of MixMatch.

As a summary:

  1. We learned what is an abstract method, and how to implement it

  2. We implement the model in LightningModule much like we would in nn.Module

  3. We implement on_before_batch_transfer to preprocess the batch

  4. Finally, we implement on_after_backward to update the EMA model

With the above in mind, let's look at the MixMatch implementation.

forward (abstract)

Forward pass of the model

ema_model (abstract)

The model that is used for EMA. We expect this property to be implemented by the child class.

update_ema (abstract)

The method to update the EMA model. We expect this method to be implemented by the child class.

loss_unl_scaler (static)

Takes in the current progress of the training, 0.0 to 1.0, where 0.0 is the start of the training, and 1.0 is the end. Then, returns the multiplier for the unlabeled loss.

loss_lbl (static)

Implements the loss for labeled data. Takes in the predicted labels and the ground truth labels, and returns the loss. This is cross entropy for MixMatch.

loss_unl (static)

Implements the loss for unlabeled data. Takes in the predicted labels and the ground truth labels, and returns the loss. This is MSE for MixMatch.

mixup

Takes in the data and the labels, the beta distribution parameter, and returns the mixed data and labels.

sharpen

Takes in the labels and temperature, and returns the sharpened labels.

guess_labels

Takes in the unlabeled data, and returns the guessed labels.

progress

The current progress of the training, 0.0 to 1.0, where 0.0 is the start of the training, and 1.0 is the end.

training_step

The training step runs through 1 batch of data, and returns the loss. Note that this is significantly different from validation step, as we handle the K-Augmented data differently.

test / validation_step

The test / validation step runs through 1 batch of data, and returns the loss.

predict_step

The predict step runs through 1 batch of data, and returns the actual decoded labels.

on_after_backward

The on_after_backward hook is called after the backward pass, and allows us to do custom operations. In our case, we use it to update the EMA model.

on_before_batch_transfer

The on_before_batch_transfer hook is called before the batch is transferred to the GPU. In our case, we use it to preprocess the batch.

A diagram of how these components interact with each other is shown below:

Batch
on_before_batch_transfer
training_step
guess_labels
sharpen
mix_up
loss_unl
loss_unl_scaler
loss
loss_lbl
backward
on_after_backward
update_ema
validation_step
loss

Finally, we show an example of how to use the MixMatch module:

from sklearn.preprocessing import StandardScaler, OrdinalEncoder from frdc.train.mixmatch_module import MixMatchModule @@ -60,7 +60,7 @@ sharpen_temp=0.5, mix_beta_alpha=0.75, ) -

In particular, we need to supply some transformations for the preprocessing step. In this case, we use StandardScaler to scale the data, and OrdinalEncoder to encode the labels.

  1. It's best if standardization is done only on the training data, and not the validation data to better fit real-world scenarios.

  2. We use OrdinalEncoder as it handles unseen labels. So if a class doesn't show up in the training data, it will be encoded as np.nan, and will not participate in the loss calculation.

Design Choices

Static Method Overriding

We implement many functions as static, as we believe that a functional style reduces dependencies, thus making the code easier to test and debug.

Furthermore, it allows the subclasses to easily override the functions, to customize the behavior of the MixMatch module.

For example, the loss_unl_scaler function is static, thus, we can implement our own scaling function, and pass it to the MixMatch module.

+

In particular, we need to supply some transformations for the preprocessing step. In this case, we use StandardScaler to scale the data, and OrdinalEncoder to encode the labels.

  1. It's best if standardization is done only on the training data, and not the validation data to better fit real-world scenarios.

  2. We use OrdinalEncoder as it handles unseen labels. So if a class doesn't show up in the training data, it will be encoded as np.nan, and will not participate in the loss calculation.

Design Choices

Static Method Overriding

We implement many functions as static, as we believe that a functional style reduces dependencies, thus making the code easier to test and debug.

Furthermore, it allows the subclasses to easily override the functions, to customize the behavior of the MixMatch module.

For example, the loss_unl_scaler function is static, thus, we can implement our own scaling function, and pass it to the MixMatch module.

def my_loss_unl_scaler(progress: float) -> float: return progress ** 2 @@ -68,4 +68,4 @@ @staticmethod def loss_unl_scaler(progress: float) -> float: return my_loss_unl_scaler(progress) -

If we had used a method instead, we would have to consider instance state, which would make it harder to override.

Why not use Dataclasses?

One of the biggest caveats of nn.Module is that it requires super().__init__() to be called before anything is assigned. While dataclass can leverage __post_init__ to do the same, we felt that this was too much of a hassle to save a few keystrokes. Thus, we opted to use __init__ instead, while more verbose, it is more explicit.

Why use PyTorch Lightning?

While we did hit some road blocks implementing SSL, due to its complex and unconventional nature, we felt that the benefits of using PyTorch Lightning outweighed the cons.

on_before_batch_transfer and on_after_backward are unconventional hooks, and we had to do some digging to find them. It can be argued that by just writing explicit code, we can avoid the need for these hooks, but the PyTorch ecosystem fixes many other issues, so we closed an eye on this.

References

Last modified: 27 December 2023
\ No newline at end of file +

If we had used a method instead, we would have to consider instance state, which would make it harder to override.

Why not use Dataclasses?

One of the biggest caveats of nn.Module is that it requires super().__init__() to be called before anything is assigned. While dataclass can leverage __post_init__ to do the same, we felt that this was too much of a hassle to save a few keystrokes. Thus, we opted to use __init__ instead, while more verbose, it is more explicit.

Why use PyTorch Lightning?

While we did hit some road blocks implementing SSL, due to its complex and unconventional nature, we felt that the benefits of using PyTorch Lightning outweighed the cons.

on_before_batch_transfer and on_after_backward are unconventional hooks, and we had to do some digging to find them. It can be argued that by just writing explicit code, we can avoid the need for these hooks, but the PyTorch ecosystem fixes many other issues, so we closed an eye on this.

References

Last modified: 29 December 2023
\ No newline at end of file diff --git a/docs/mix-match.html b/docs/mix-match.html index 5d0f4795..80686148 100644 --- a/docs/mix-match.html +++ b/docs/mix-match.html @@ -1 +1 @@ - MixMatch | Documentation

Documentation 0.0.8 Help

MixMatch

In FRDC-ML, we leverage semi-supervised learning to improve the model's performance through better augmentation consistency and using even unlabelled data.

The algorithm we use is MixMatch. A state-of-the-art semi-supervised learning algorithm. It is based on the idea of consistency regularization, which encourages models to predict the same class even after augmentations that occur naturally in the real world.

Our implementation of MixMatch is a refactored version of YU1ut/MixMatch-pytorch We've refactored the code to follow more modern PyTorch practices, allowing us to utilize it with modern PyTorch frameworks such as PyTorch Lightning.

We won't go through the details of MixMatch here, see Our Documentation in our MixMatch-PyTorch-CIFAR10 repository for more details.

Implementation Details

  1. How we implemented the MixMatch logic MixMatchModule

  2. How we implemented the unique MixMatch data loading logic Custom MixMatch Data Loading

References

Last modified: 27 December 2023
\ No newline at end of file + MixMatch | Documentation

Documentation 0.0.8 Help

MixMatch

In FRDC-ML, we leverage semi-supervised learning to improve the model's performance through better augmentation consistency and using even unlabelled data.

The algorithm we use is MixMatch. A state-of-the-art semi-supervised learning algorithm. It is based on the idea of consistency regularization, which encourages models to predict the same class even after augmentations that occur naturally in the real world.

Our implementation of MixMatch is a refactored version of YU1ut/MixMatch-pytorch We've refactored the code to follow more modern PyTorch practices, allowing us to utilize it with modern PyTorch frameworks such as PyTorch Lightning.

We won't go through the details of MixMatch here, see Our Documentation in our MixMatch-PyTorch-CIFAR10 repository for more details.

Implementation Details

  1. How we implemented the MixMatch logic MixMatchModule

  2. How we implemented the unique MixMatch data loading logic Custom MixMatch Data Loading

References

Last modified: 29 December 2023
\ No newline at end of file diff --git a/docs/model-test-chestnut-may-dec.html b/docs/model-test-chestnut-may-dec.html index 91b6538a..245427b2 100644 --- a/docs/model-test-chestnut-may-dec.html +++ b/docs/model-test-chestnut-may-dec.html @@ -1 +1 @@ - Model Test Chestnut May-Dec | Documentation

Documentation 0.0.8 Help

Model Test Chestnut May-Dec

This test is used to evaluate the model performance on the Chestnut Nature Park May & December dataset.

See this script in model_tests/chestnut_dec_may/train.py.

Motivation

The usage of this model will be to classify trees in unseen datasets under different conditions. In this test, we'll evaluate it under a different season.

A caveat is that it'll be evaluated on the same set of trees, so it's not a representative of a field-test. However, given difficulties of yielding datasets, this still gives us a good preliminary idea of how the model will perform in different conditions.

Methodology

We train on the December dataset, and test on the May dataset.

Labelled Train
Unlabelled Train
Test
DecDataset
Model
MayDataset

Despite not having any true unlabelled data, we use MixMatch by treating the labelled data of the December dataset as unlabelled data.

Model

The current Model used is a simple InceptionV3 Transfer Learning model, with the last layer replaced with a fully connected layer(s).

SSL Loss
Input
InceptionV3 Frozen
FC Layer(s)
Softmax
Output

Preprocessing

For Training:

Segment
RandomCrop 299
Horizontal Flip 50%
Vertical Flip 50%
Normalize By Training Mean & Std

For Validation:

Segment
CenterCrop 299
Normalize By Training Mean & Std

For Evaluation:

Segment
CenterCrop 299
Normalize By Training Mean & Std
As Is
Horizontal Flip
Vertical Flip
Horizontal & Vertical Flip

For evaluation, we evaluate that the model should be invariant to horizontal and vertical flips, as well as the original image.

Hyperparameters

The following hyperparameters are used:

  • Optimizer: Adam

  • Learning Rate: 1e-3

  • Batch Size: 32

  • Epochs: 10

  • Train Iterations: 25~100

  • Validation Iterations: 10~25

  • Early Stopping: 4

Results

We evaluate around 40% accuracy on the test set, compared to 100% for the training set. This indicates that the model has saturated and is not able to learn anymore from the training set. There's no indication of overfitting as the validation loss just plateaus.

W&B Dashboard

Caveats

  • The test set is very small, so the results are not very representative.

  • The test set is the same set of trees, so it's not a true test of the model performance in different conditions.

  • There are many classes with 1 sample, so the model may not be able to learn the features of these classes well.

Last modified: 27 December 2023
\ No newline at end of file + Model Test Chestnut May-Dec | Documentation

Documentation 0.0.8 Help

Model Test Chestnut May-Dec

This test is used to evaluate the model performance on the Chestnut Nature Park May & December dataset.

See this script in model_tests/chestnut_dec_may/train.py.

Motivation

The usage of this model will be to classify trees in unseen datasets under different conditions. In this test, we'll evaluate it under a different season.

A caveat is that it'll be evaluated on the same set of trees, so it's not a representative of a field-test. However, given difficulties of yielding datasets, this still gives us a good preliminary idea of how the model will perform in different conditions.

Methodology

We train on the December dataset, and test on the May dataset.

Labelled Train
Unlabelled Train
Test
DecDataset
Model
MayDataset

Despite not having any true unlabelled data, we use MixMatch by treating the labelled data of the December dataset as unlabelled data.

Model

The current Model used is a simple InceptionV3 Transfer Learning model, with the last layer replaced with a fully connected layer(s).

SSL Loss
Input
InceptionV3 Frozen
FC Layer(s)
Softmax
Output

Preprocessing

For Training:

Segment
RandomCrop 299
Horizontal Flip 50%
Vertical Flip 50%
Normalize By Training Mean & Std

For Validation:

Segment
CenterCrop 299
Normalize By Training Mean & Std

For Evaluation:

Segment
CenterCrop 299
Normalize By Training Mean & Std
As Is
Horizontal Flip
Vertical Flip
Horizontal & Vertical Flip

For evaluation, we evaluate that the model should be invariant to horizontal and vertical flips, as well as the original image.

Hyperparameters

The following hyperparameters are used:

  • Optimizer: Adam

  • Learning Rate: 1e-3

  • Batch Size: 32

  • Epochs: 10

  • Train Iterations: 25~100

  • Validation Iterations: 10~25

  • Early Stopping: 4

Results

We evaluate around 40% accuracy on the test set, compared to 100% for the training set. This indicates that the model has saturated and is not able to learn anymore from the training set. There's no indication of overfitting as the validation loss just plateaus.

W&B Dashboard

Caveats

  • The test set is very small, so the results are not very representative.

  • The test set is the same set of trees, so it's not a true test of the model performance in different conditions.

  • There are many classes with 1 sample, so the model may not be able to learn the features of these classes well.

Last modified: 29 December 2023
\ No newline at end of file diff --git a/docs/overview.html b/docs/overview.html index 627976e2..896ab236 100644 --- a/docs/overview.html +++ b/docs/overview.html @@ -1 +1 @@ - Overview | Documentation

Documentation 0.0.8 Help

Overview

Forest Recovery Digital Companion (FRDC) is a ML-assisted companion for ecologists to automatically classify surveyed trees via an Unmanned Aerial Vehicle (UAV).

This package, FRDC-ML is the Machine Learning backbone of this project, a centralized repository of tools and model architectures to be used in the FRDC pipeline.

Get started here

Other Projects

FRDC-UI

The User Interface Repository for FRDC, a WebApp GUI for ecologists to adjust annotations.

Last modified: 27 December 2023
\ No newline at end of file + Overview | Documentation

Documentation 0.0.8 Help

Overview

Forest Recovery Digital Companion (FRDC) is a ML-assisted companion for ecologists to automatically classify surveyed trees via an Unmanned Aerial Vehicle (UAV).

This package, FRDC-ML is the Machine Learning backbone of this project, a centralized repository of tools and model architectures to be used in the FRDC pipeline.

Get started here

Other Projects

FRDC-UI

The User Interface Repository for FRDC, a WebApp GUI for ecologists to adjust annotations.

Last modified: 29 December 2023
\ No newline at end of file diff --git a/docs/preprocessing-extract-segments.html b/docs/preprocessing-extract-segments.html index 03e0ae62..9a248812 100644 --- a/docs/preprocessing-extract-segments.html +++ b/docs/preprocessing-extract-segments.html @@ -1,4 +1,4 @@ - preprocessing.extract_segments | Documentation

Documentation 0.0.8 Help

preprocessing.extract_segments

Functions

extract_segments_from_labels

Extracts segments from a label classification.

extract_segments_from_bounds

Extracts segments from Rect bounds.

remove_small_segments_from_labels

Removes small segments from a label classification.

Extract with Boundaries

A boundary is a Rect object that represents the minimum bounding box of a segment, with x0, y0, x1, y1 coordinates.

It simply slices the original image to the bounding box. The origin is the top left corner of the image.

+ preprocessing.extract_segments | Documentation

Documentation 0.0.8 Help

preprocessing.extract_segments

Functions

extract_segments_from_labels

Extracts segments from a label classification.

extract_segments_from_bounds

Extracts segments from Rect bounds.

remove_small_segments_from_labels

Removes small segments from a label classification.

Extract with Boundaries

A boundary is a Rect object that represents the minimum bounding box of a segment, with x0, y0, x1, y1 coordinates.

It simply slices the original image to the bounding box. The origin is the top left corner of the image.

+-----------------+ +-----------+ | Original | | Segmented | | Image | | Image | @@ -9,7 +9,7 @@ +-----+-----+-----+ 1, 2, 0, 2 +-----+-----+ | 7 | 8 | 9 | x0 y0 x1 y1 | 8 | 9 | +-----+-----+-----+ +-----+-----+ -
+
+-----------------+ +-----------------+ | Original | | Segmented | | Image | | Image | @@ -20,7 +20,7 @@ +-----+-----+-----+ 1, 2, 0, 2 +-----+-----+-----+ | 7 | 8 | 9 | x0 y0 x1 y1 | 0 | 8 | 9 | +-----+-----+-----+ +-----+-----+-----+ -

Extract with Labels

A label classification is a np.ndarray where each pixel is mapped to a segment. The segments are mapped to a unique integer. In our project, the 0th label is the background.

For example, a label classification of 3 segments will look like this:

+

Extract with Labels

A label classification is a np.ndarray where each pixel is mapped to a segment. The segments are mapped to a unique integer. In our project, the 0th label is the background.

For example, a label classification of 3 segments will look like this:

+-----------------+ +-----------------+ | Label | | Original | | Classification | | Image | @@ -31,7 +31,7 @@ +-----+-----+-----+ +-----+-----+-----+ | 1 | 1 | 0 | | 7 | 8 | 9 | +-----+-----+-----+ +-----+-----+-----+ -

The extraction will take the minimum bounding box of each segment and return a list of segments.

For example, the label 1 and 2 extracted images will be

+

The extraction will take the minimum bounding box of each segment and return a list of segments.

For example, the label 1 and 2 extracted images will be

+-----------+ +-----------+ | Extracted | | Extracted | | Segment 1 | | Segment 2 | @@ -42,7 +42,7 @@ +-----+-----+ +-----+-----+ | 7 | 8 | +-----+-----+ -
+
+-----------------+ +-----------------+ | Extracted | | Extracted | | Segment 1 | | Segment 2 | @@ -53,23 +53,21 @@ +-----+-----+-----+ +-----+-----+-----+ | 7 | 8 | 0 | | 0 | 0 | 0 | +-----+-----+-----+ +-----+-----+-----+ -
  • If cropped is False, the segments are padded with 0s to the original image size. While this can ensure shape consistency, it can consume more memory for large images.

  • If cropped is True, the segments are cropped to the minimum bounding box. This can save memory, but the shape of the segments will be inconsistent.

Usage

Extract from Bounds and Labels

Extract segments from bounds and labels.

+
  • If cropped is False, the segments are padded with 0s to the original image size. While this can ensure shape consistency, it can consume more memory for large images.

  • If cropped is True, the segments are cropped to the minimum bounding box. This can save memory, but the shape of the segments will be inconsistent.

Usage

Extract from Bounds and Labels

Extract segments from bounds and labels.

import numpy as np -from frdc.load import FRDCDataset +from frdc.load.preset import FRDCDatasetPreset from frdc.preprocess.extract_segments import extract_segments_from_bounds -ds = FRDCDataset(site='chestnut_nature_park', - date='20201218', - version=None, ) +ds = FRDCDatasetPreset.chestnut_20201218() ar, order = ds.get_ar_bands() bounds, labels = ds.get_bounds_and_labels() segments: list[np.ndarray] = extract_segments_from_bounds(ar, bounds) -

Extract from Auto-Segmentation

Extract segments from a label classification.

+

Extract from Auto-Segmentation

Extract segments from a label classification.

from skimage.morphology import remove_small_objects, remove_small_holes import numpy as np -from frdc.load import FRDCDataset +from frdc.load.preset import FRDCDatasetPreset from frdc.preprocess.morphology import ( threshold_binary_mask, binary_watershed ) @@ -78,9 +76,7 @@ extract_segments_from_labels, remove_small_segments_from_labels ) -ds = FRDCDataset(site='chestnut_nature_park', - date='20201218', - version=None, ) +ds = FRDCDatasetPreset.chestnut_20201218() ar, order = ds.get_ar_bands() ar = scale_0_1_per_band(ar) ar_mask = threshold_binary_mask(ar, -1, 90 / 256) @@ -91,4 +87,4 @@ min_height=10, min_width=10) segments: list[np.ndarray] = extract_segments_from_labels(ar, ar_labels) -

API

extract_segments_from_labels(ar, ar_labels, cropped)

Extracts segments from a label classification.


ar_labels is a label classification as a np.ndarray

extract_segments_from_bounds(ar, bounds, cropped)

Extracts segments from Rect bounds.


bounds is a list of Rect bounds.

remove_small_segments_from_labels(ar_labels, min_height, min_width)

Removes small segments from a label classification.


Last modified: 27 December 2023
\ No newline at end of file +

API

extract_segments_from_labels(ar, ar_labels, cropped)

Extracts segments from a label classification.


ar_labels is a label classification as a np.ndarray

extract_segments_from_bounds(ar, bounds, cropped)

Extracts segments from Rect bounds.


bounds is a list of Rect bounds.

remove_small_segments_from_labels(ar_labels, min_height, min_width)

Removes small segments from a label classification.


Last modified: 29 December 2023
\ No newline at end of file diff --git a/docs/preprocessing-glcm-padded.html b/docs/preprocessing-glcm-padded.html index 0f99613f..f9e3da4c 100644 --- a/docs/preprocessing-glcm-padded.html +++ b/docs/preprocessing-glcm-padded.html @@ -1,4 +1,4 @@ - preprocessing.glcm_padded | Documentation

Documentation 0.0.8 Help

preprocessing.glcm_padded

Functions

glcm_padded

Computes the GLCM of the NDArray bands with padding.

glcm_padded_cached

Computes the GLCM of the NDArray bands with padding, and caches it.

append_glcm_padded_cached

Computes the GLCM of the NDArray bands with padding, and caches it and also appends it onto the original array.

Usage

We show a few examples of how to use the GLCM functions.

+ preprocessing.glcm_padded | Documentation

Documentation 0.0.8 Help

preprocessing.glcm_padded

Functions

glcm_padded

Computes the GLCM of the NDArray bands with padding.

glcm_padded_cached

Computes the GLCM of the NDArray bands with padding, and caches it.

append_glcm_padded_cached

Computes the GLCM of the NDArray bands with padding, and caches it and also appends it onto the original array.

Usage

We show a few examples of how to use the GLCM functions.

import numpy as np from glcm_cupy import Features @@ -23,4 +23,4 @@ ar_glcm_cached_appended = append_glcm_padded_cached(ar, bin_from=1, bin_to=4, radius=3) -
  • ar_glcm is the GLCM of the original array, with the last dimension being the GLCM features. The number of features is determined by the features parameter, which defaults to all features.

  • ar_glcm_2_features selects only 2 features, with the last dimension being the 2 GLCM features specified.

  • ar_glcm_cached caches the GLCM so that if you call it again, it will return the cached version. It stores its data at the project root dir, under .cache/.

  • ar_glcm_cached_appended is a wrapper around ar_glcm_cached, it appends the GLCM features onto the original array. It's equivalent to calling ar_glcm_cached and then np.concatenate on the final axes.

Caching

GLCM is an expensive operation, thus we recommend to cache it if the input parameters will be the same. This is especially useful if you're experimenting with the same dataset with constant parameters.

API

glcm_padded(ar, bin_from, bin_to, radius, step_size, features)

Computes the GLCM of the NDArray bands with padding.


  • ar is the input array

  • bin_from is the upper bound of the input

  • bin_to is the upper bound of the GLCM input, i.e. the resolution that GLCM operates on

  • radius is the radius of the GLCM

  • step_size is the step size of the GLCM

  • features is the list of GLCM features to compute

The return shape is

See glcm_cupy for the GLCM Features.

glcm_padded_cached(ar, bin_from, bin_to, radius, step_size, features)

Computes the GLCM of the NDArray bands with padding, and caches it.


See glcm_padded for the parameters and output shape

append_glcm_padded_cached(ar, bin_from, bin_to, radius, step_size, features)

Computes the GLCM of the NDArray bands with padding, and caches it and also appends it onto the original array.


See glcm_padded for the parameters


The return shape is:

The function automatically flattens the last 2 dimensions of the GLCM features, and appends it onto the original array.

Last modified: 27 December 2023
\ No newline at end of file +
  • ar_glcm is the GLCM of the original array, with the last dimension being the GLCM features. The number of features is determined by the features parameter, which defaults to all features.

  • ar_glcm_2_features selects only 2 features, with the last dimension being the 2 GLCM features specified.

  • ar_glcm_cached caches the GLCM so that if you call it again, it will return the cached version. It stores its data at the project root dir, under .cache/.

  • ar_glcm_cached_appended is a wrapper around ar_glcm_cached, it appends the GLCM features onto the original array. It's equivalent to calling ar_glcm_cached and then np.concatenate on the final axes.

Caching

GLCM is an expensive operation, thus we recommend to cache it if the input parameters will be the same. This is especially useful if you're experimenting with the same dataset with constant parameters.

API

glcm_padded(ar, bin_from, bin_to, radius, step_size, features)

Computes the GLCM of the NDArray bands with padding.


  • ar is the input array

  • bin_from is the upper bound of the input

  • bin_to is the upper bound of the GLCM input, i.e. the resolution that GLCM operates on

  • radius is the radius of the GLCM

  • step_size is the step size of the GLCM

  • features is the list of GLCM features to compute

The return shape is

See glcm_cupy for the GLCM Features.

glcm_padded_cached(ar, bin_from, bin_to, radius, step_size, features)

Computes the GLCM of the NDArray bands with padding, and caches it.


See glcm_padded for the parameters and output shape

append_glcm_padded_cached(ar, bin_from, bin_to, radius, step_size, features)

Computes the GLCM of the NDArray bands with padding, and caches it and also appends it onto the original array.


See glcm_padded for the parameters


The return shape is:

The function automatically flattens the last 2 dimensions of the GLCM features, and appends it onto the original array.

Last modified: 29 December 2023
\ No newline at end of file diff --git a/docs/preprocessing-morphology.html b/docs/preprocessing-morphology.html index 7817824f..0c21fd83 100644 --- a/docs/preprocessing-morphology.html +++ b/docs/preprocessing-morphology.html @@ -1,15 +1,13 @@ - preprocessing.morphology | Documentation

Documentation 0.0.8 Help

preprocessing.morphology

Functions

threshold_binary_mask

Thresholds a selected NDArray bands to yield a binary mask.

binary_watershed

Performs watershed on a binary mask to yield a mapped label classification

Usage

Perform auto-segmentation on a dataset to yield a label classification.

-from frdc.load import FRDCDataset + preprocessing.morphology | Documentation

Documentation 0.0.8 Help

preprocessing.morphology

Functions

threshold_binary_mask

Thresholds a selected NDArray bands to yield a binary mask.

binary_watershed

Performs watershed on a binary mask to yield a mapped label classification

Usage

Perform auto-segmentation on a dataset to yield a label classification.

+from frdc.load.preset import FRDCDatasetPreset from frdc.preprocess.morphology import ( threshold_binary_mask, binary_watershed ) -ds = FRDCDataset(site='chestnut_nature_park', - date='20201218', - version=None, ) +ds = FRDCDatasetPreset.chestnut_20201218() ar, order = ds.get_ar_bands() mask = threshold_binary_mask(ar, order.index('NIR'), 90 / 256) ar_label = binary_watershed(mask) -

API

threshold_binary_mask(ar, band_idx, threshold_value)

Thresholds a selected NDArray bands to yield a binary mask as np.ndarray


This is equivalent to

+

API

threshold_binary_mask(ar, band_idx, threshold_value)

Thresholds a selected NDArray bands to yield a binary mask as np.ndarray


This is equivalent to

ar[..., band_idx] > threshold_value -
binary_watershed(ar_mask, peaks_footprint, watershed_compactness)

Performs watershed on a binary mask to yield a mapped label classification as a np.ndarray


  • peaks_footprint is the footprint of skimage.feature.peak_local_max

  • watershed_compactness is the compactness of skimage.morphology.watershed

Last modified: 27 December 2023
\ No newline at end of file +
binary_watershed(ar_mask, peaks_footprint, watershed_compactness)

Performs watershed on a binary mask to yield a mapped label classification as a np.ndarray


  • peaks_footprint is the footprint of skimage.feature.peak_local_max

  • watershed_compactness is the compactness of skimage.morphology.watershed

Last modified: 29 December 2023
\ No newline at end of file diff --git a/docs/preprocessing-scale.html b/docs/preprocessing-scale.html index cdf27d34..2dd27886 100644 --- a/docs/preprocessing-scale.html +++ b/docs/preprocessing-scale.html @@ -1,15 +1,13 @@ - preprocessing.scale | Documentation

Documentation 0.0.8 Help

preprocessing.scale

Functions

scale_0_1_per_band

Scales the NDArray bands to [0, 1] per band.

scale_normal_per_band

Scales the NDArray bands to zero mean unit variance per band.

scale_static_per_band

Scales the NDArray bands by a predefined configuration. Take a look at frdc.conf.BAND_MAX_CONFIG for an example.

Usage

-from frdc.load import FRDCDataset + preprocessing.scale | Documentation

Documentation 0.0.8 Help

preprocessing.scale

Functions

scale_0_1_per_band

Scales the NDArray bands to [0, 1] per band.

scale_normal_per_band

Scales the NDArray bands to zero mean unit variance per band.

scale_static_per_band

Scales the NDArray bands by a predefined configuration. Take a look at frdc.conf.BAND_MAX_CONFIG for an example.

Usage

+from frdc.load.preset import FRDCDatasetPreset from frdc.preprocess.scale import ( scale_0_1_per_band, scale_normal_per_band, scale_static_per_band ) from frdc.conf import BAND_MAX_CONFIG -ds = FRDCDataset(site='chestnut_nature_park', - date='20201218', - version=None, ) +ds = FRDCDatasetPreset.chestnut_20201218() ar, order = ds.get_ar_bands() ar_01 = scale_0_1_per_band(ar) ar_norm = scale_normal_per_band(ar) ar_static = scale_static_per_band(ar, order, BAND_MAX_CONFIG) -
Last modified: 27 December 2023
\ No newline at end of file +
Last modified: 29 December 2023
\ No newline at end of file diff --git a/docs/retrieve-our-datasets.html b/docs/retrieve-our-datasets.html index 34b21eb2..0f0ef2d9 100644 --- a/docs/retrieve-our-datasets.html +++ b/docs/retrieve-our-datasets.html @@ -1,10 +1,10 @@ - Retrieve our Datasets | Documentation

Documentation 0.0.8 Help

Retrieve our Datasets

In this tutorial, we'll learn how to :

  • Retrieve FRDC's Hyperspectral Image Data as np.ndarray

  • Retrieve FRDC's Ground Truth bounds and labels

  • Slice/segment the image data by the bounds

Prerequisites

  • New here? Get Started.

  • Setup the Google Cloud Authorization to download the data.

Retrieve the Data

To retrieve the data, use FRDCDataset

Here, we'll download and load our

  • ar: Hyperspectral Image Data

  • order: The order of the bands

  • bounds: The bounds of the trees (segments)

  • labels: The labels of the trees (segments)

-from frdc.load.dataset import FRDCDataset + Retrieve our Datasets | Documentation

Documentation 0.0.8 Help

Retrieve our Datasets

In this tutorial, we'll learn how to :

  • Retrieve FRDC's Hyperspectral Image Data as np.ndarray

  • Retrieve FRDC's Ground Truth bounds and labels

  • Slice/segment the image data by the bounds

Prerequisites

  • New here? Get Started.

  • Setup the Google Cloud Authorization to download the data.

Retrieve the Data

To retrieve the data, use FRDCDataset

Here, we'll download and load our

  • ar: Hyperspectral Image Data

  • order: The order of the bands

  • bounds: The bounds of the trees (segments)

  • labels: The labels of the trees (segments)

+from frdc.load.preset import FRDCDatasetPreset -ds = FRDCDataset(site="chestnut_nature_park", date="20201218", version=None) +ds = FRDCDatasetPreset.chestnut_20201218() ar, order = ds.get_ar_bands() bounds, labels = ds.get_bounds_and_labels() -

What Datasets are there?

+

What Datasets are there?

from frdc.load.gcs import list_gcs_datasets print(list_gcs_datasets()) # 0 DEBUG/0 @@ -12,22 +12,22 @@ # 2 casuarina/20220418/93deg # 3 chestnut_nature_park/20201218 # ... -
  • The first part of the path is the site, and the second part is the date.

  • The version is the rest of the path, if there isn't any, use None.

  • site="ds"

  • date="date"

  • version="ver"

  • site="ds"

  • date="date"

  • version="ver/01/data"

  • site="ds"

  • date="date"

  • version=None

Segment the Data

To segment the data, use Extract Segments.

Here, we'll segment the data by the bounds.

-from frdc.load.dataset import FRDCDataset +
  • The first part of the path is the site, and the second part is the date.

  • The version is the rest of the path, if there isn't any, use None.

  • site="ds"

  • date="date"

  • version="ver"

  • site="ds"

  • date="date"

  • version="ver/01/data"

  • site="ds"

  • date="date"

  • version=None

Segment the Data

To segment the data, use Extract Segments.

Here, we'll segment the data by the bounds.

+from frdc.load.preset import FRDCDatasetPreset from frdc.preprocess.extract_segments import extract_segments_from_bounds -ds = FRDCDataset(site="chestnut_nature_park", date="20201218", version=None) +ds = FRDCDatasetPreset.chestnut_20201218() ar, order = ds.get_ar_bands() bounds, labels = ds.get_bounds_and_labels() segments = extract_segments_from_bounds(ar, bounds) -

segments is a list of np.ndarray of shape H, W, C, representing a tree. The order of segments is the same as labels, so you can use labels to identify the tree.

Plot the Data (Optional)

We can then use these data to plot out the first tree segment.

+

segments is a list of np.ndarray of shape H, W, C, representing a tree. The order of segments is the same as labels, so you can use labels to identify the tree.

Plot the Data (Optional)

We can then use these data to plot out the first tree segment.

import matplotlib.pyplot as plt -from frdc.load.dataset import FRDCDataset +from frdc.load.preset import FRDCDatasetPreset from frdc.preprocess.extract_segments import extract_segments_from_bounds from frdc.preprocess.scale import scale_0_1_per_band -ds = FRDCDataset(site="chestnut_nature_park", date="20201218", version=None) +ds = FRDCDatasetPreset.chestnut_20201218() ar, order = ds.get_ar_bands() bounds, labels = ds.get_bounds_and_labels() segments = extract_segments_from_bounds(ar, bounds) @@ -38,4 +38,4 @@ plt.imshow(segment_0_rgb_scaled) plt.title(f"Tree {labels[0]}") plt.show() -

See also: preprocessing.scale.scale_0_1_per_band

MatPlotLib cannot show the data correctly as-is, so we need to

  • Convert the data from BGR to RGB

  • Scale the data to 0-1 per band

Last modified: 27 December 2023
\ No newline at end of file +

See also: preprocessing.scale.scale_0_1_per_band

MatPlotLib cannot show the data correctly as-is, so we need to

  • Convert the data from BGR to RGB

  • Scale the data to 0-1 per band

Last modified: 29 December 2023
\ No newline at end of file diff --git a/docs/site.webmanifest b/docs/site.webmanifest deleted file mode 100644 index fe6a9303..00000000 --- a/docs/site.webmanifest +++ /dev/null @@ -1,11 +0,0 @@ -{ - "name": "JetBrains", - "short_name": "JetBrains", - "icons": [ - { "src": "icon-192.png", "type": "image/png", "sizes": "192x192" }, - { "src": "icon-512.png", "type": "image/png", "sizes": "512x512" } - ], - "theme_color": "#000000", - "background_color": "#000000", - "display": "standalone" -} \ No newline at end of file diff --git a/docs/train-frdc-lightning.html b/docs/train-frdc-lightning.html index 19ba79c2..2bdfd346 100644 --- a/docs/train-frdc-lightning.html +++ b/docs/train-frdc-lightning.html @@ -1,4 +1,4 @@ - train.frdc_datamodule & frdc_module | Documentation

Documentation 0.0.8 Help

train.frdc_datamodule & frdc_module

These are FRDC specific LightningDataModule and LightningModule, a core component in the PyTorch Lightning ecosystem to provide a simple interface to train and evaluate models.

Classes

FRDCDataModule

The FRDC PyTorch Lightning DataModule.

FRDCModule

The FRDC PyTorch Lightning Module.

Usage

API

FRDCDataModule(segments, labels, preprocess, augmentation, train_val_test_split, batch_size)

Initializes the FRDC PyTorch Lightning DataModule.


  • segments, labels are retrieved from

  • preprocess is a function that takes in a segment and returns a preprocessed segment. In particular, it should accept a list of NumPy NDArrays and return a single stacked PyToch Tensor.

  • augmentation is a function that takes in a segment and returns an augmented segment. In particular, it takes in a PyTorch Tensor and returns another.

  • train_val_test_split is a function that takes a TensorDataset and returns a list of 3 TensorDatasets, for train, val and test respectively.

  • batch_size is the batch size.

FRDCModule(model_cls, model_kwargs, optim_cls, optim_kwargs)

Initializes the FRDC PyTorch Lightning Module.


  • model_cls is the Class of the model.

  • model_kwargs is the kwargs to pass to the model.

  • optim_cls is the Class of the optimizer.

  • optim_kwargs is the kwargs to pass to the optimizer.

Internally, the module will initialize the model and optimizer as follows:

+ train.frdc_datamodule & frdc_module | Documentation

Documentation 0.0.8 Help

train.frdc_datamodule & frdc_module

These are FRDC specific LightningDataModule and LightningModule, a core component in the PyTorch Lightning ecosystem to provide a simple interface to train and evaluate models.

Classes

FRDCDataModule

The FRDC PyTorch Lightning DataModule.

FRDCModule

The FRDC PyTorch Lightning Module.

Usage

API

FRDCDataModule(segments, labels, preprocess, augmentation, train_val_test_split, batch_size)

Initializes the FRDC PyTorch Lightning DataModule.


  • segments, labels are retrieved from

  • preprocess is a function that takes in a segment and returns a preprocessed segment. In particular, it should accept a list of NumPy NDArrays and return a single stacked PyToch Tensor.

  • augmentation is a function that takes in a segment and returns an augmented segment. In particular, it takes in a PyTorch Tensor and returns another.

  • train_val_test_split is a function that takes a TensorDataset and returns a list of 3 TensorDatasets, for train, val and test respectively.

  • batch_size is the batch size.

FRDCModule(model_cls, model_kwargs, optim_cls, optim_kwargs)

Initializes the FRDC PyTorch Lightning Module.


  • model_cls is the Class of the model.

  • model_kwargs is the kwargs to pass to the model.

  • optim_cls is the Class of the optimizer.

  • optim_kwargs is the kwargs to pass to the optimizer.

Internally, the module will initialize the model and optimizer as follows:

model = model_cls(**model_kwargs) optim = optim_cls(model.parameters(), **optim_kwargs) -
Last modified: 27 December 2023
\ No newline at end of file +
Last modified: 29 December 2023
\ No newline at end of file