From e1beab1c3003f812a74a84e9772e85b45e22781b Mon Sep 17 00:00:00 2001 From: Evening Date: Wed, 29 May 2024 15:11:15 +0800 Subject: [PATCH 01/10] Refactor dummy dl code --- src/frdc/train/frdc_datamodule.py | 38 ++++++++++++------------------- 1 file changed, 14 insertions(+), 24 deletions(-) diff --git a/src/frdc/train/frdc_datamodule.py b/src/frdc/train/frdc_datamodule.py index 7b3120a..cc27ac7 100644 --- a/src/frdc/train/frdc_datamodule.py +++ b/src/frdc/train/frdc_datamodule.py @@ -70,49 +70,39 @@ def __post_init__(self): self.train_unl_ds.__class__ = FRDCUnlabelledDataset def train_dataloader(self): - num_samples = self.batch_size * self.train_iters + n_samples = self.batch_size * self.train_iters if self.sampling_strategy == "stratified": - sampler = lambda ds: RandomStratifiedSampler( - ds.targets, num_samples=num_samples, replacement=True + sampler_fn = lambda ds: RandomStratifiedSampler( + ds.targets, + num_samples=n_samples, ) elif self.sampling_strategy == "random": - sampler = lambda ds: RandomSampler( - ds, num_samples=num_samples, replacement=True + sampler_fn = lambda ds: RandomSampler( + ds, + num_samples=n_samples, ) else: - raise ValueError( - f"Invalid sampling strategy: {self.sampling_strategy}" - ) + raise ValueError(f"Invalid strategy: {self.sampling_strategy}") lab_dl = DataLoader( self.train_lab_ds, batch_size=self.batch_size, - sampler=sampler(self.train_lab_ds), + sampler=sampler_fn(self.train_lab_ds), ) unl_dl = ( DataLoader( self.train_unl_ds, batch_size=self.batch_size, - sampler=sampler(self.train_unl_ds), + sampler=sampler_fn(self.train_unl_ds), ) if self.train_unl_ds is not None # This is a hacky way to create an empty dataloader. - # The size should be the same as the labelled dataloader so that - # the iterator doesn't prematurely stop. - else DataLoader( - empty := [[] for _ in range(len(self.train_lab_ds))], - batch_size=self.batch_size, - sampler=RandomSampler( - empty, - num_samples=num_samples, - ), - ) + # The size should be the same or larger than the + # labelled dataloader so the iterator doesn't prematurely stop. + else DataLoader([[] for _ in range(len(lab_dl))]) ) return [lab_dl, unl_dl] def val_dataloader(self): - return DataLoader( - self.val_ds, - batch_size=self.batch_size, - ) + return DataLoader(self.val_ds, batch_size=self.batch_size) From e947f431153790da537999e037dccfe01c9a7523 Mon Sep 17 00:00:00 2001 From: Evening Date: Wed, 29 May 2024 16:43:52 +0800 Subject: [PATCH 02/10] Refactor out preprocess for external use --- src/frdc/train/fixmatch_module.py | 31 +++++----- src/frdc/train/mixmatch_module.py | 21 +++---- src/frdc/train/utils.py | 95 +++++++++++++++++-------------- 3 files changed, 71 insertions(+), 76 deletions(-) diff --git a/src/frdc/train/fixmatch_module.py b/src/frdc/train/fixmatch_module.py index 5198540..40b2dec 100644 --- a/src/frdc/train/fixmatch_module.py +++ b/src/frdc/train/fixmatch_module.py @@ -68,9 +68,9 @@ def loss_unl(unl_pred: torch.Tensor, unl: torch.Tensor): return F.cross_entropy(unl_pred, unl) def training_step(self, batch, batch_idx): + (x_lbl, y_lbl), x_unls = batch opt = self.optimizers() opt.zero_grad() - (x_lbl, y_lbl), x_unls = batch self.log("train/x_lbl_mean", x_lbl.mean()) self.log("train/x_lbl_stdev", x_lbl.std()) @@ -133,7 +133,8 @@ def training_step(self, batch, batch_idx): self.log("train/acc", acc, prog_bar=True) def validation_step(self, batch, batch_idx): - x, y = batch + # The batch outputs x_unls due to our on_before_batch_transfer + (x, y), _x_unls = batch wandb.log({"val/y_lbl": wandb_hist(y, self.n_classes)}) y_pred = self(x) wandb.log( @@ -154,7 +155,8 @@ def validation_step(self, batch, batch_idx): return loss def test_step(self, batch, batch_idx): - x, y = batch + # The batch outputs x_unls due to our on_before_batch_transfer + (x, y), _x_unls = batch y_pred = self(x) loss = F.cross_entropy(y_pred, y.long()) @@ -166,7 +168,7 @@ def test_step(self, batch, batch_idx): return loss def predict_step(self, batch, *args, **kwargs) -> Any: - x, y = batch + (x, y), _x_unls = batch y_pred = self(x) y_true_str = self.y_encoder.inverse_transform( y.cpu().numpy().reshape(-1, 1) @@ -190,23 +192,16 @@ def on_before_batch_transfer(self, batch: Any, dataloader_idx: int) -> Any: want to export the model alongside the transformations. """ - # We need to handle the train and val dataloaders differently. - # For training, the unlabelled data is returned while for validation, - # the unlabelled data is just omitted. if self.training: - (x_lab, y), x_unl = batch + (x_lbl, y_lbl), x_unl = batch else: - x_lab, y = batch - x_unl = [] + x_lbl, y_lbl = batch + x_unl = None - (x_lab_trans, y_trans), x_unl_trans = preprocess( - x_lab=x_lab, - y_lab=y, - x_unl=x_unl, + return preprocess( + x_lab=x_lbl, + y_lab=y_lbl, x_scaler=self.x_scaler, y_encoder=self.y_encoder, + x_unl=x_unl, ) - if self.training: - return (x_lab_trans, y_trans), x_unl_trans - else: - return x_lab_trans, y_trans diff --git a/src/frdc/train/mixmatch_module.py b/src/frdc/train/mixmatch_module.py index 26c21b3..c99e5ac 100644 --- a/src/frdc/train/mixmatch_module.py +++ b/src/frdc/train/mixmatch_module.py @@ -250,23 +250,16 @@ def on_before_batch_transfer(self, batch: Any, dataloader_idx: int) -> Any: want to export the model alongside the transformations. """ - # We need to handle the train and val dataloaders differently. - # For training, the unlabelled data is returned while for validation, - # the unlabelled data is just omitted. if self.training: - (x_lab, y), x_unl = batch + (x_lbl, y_lbl), x_unl = batch else: - x_lab, y = batch - x_unl = [] + x_lbl, y_lbl = batch + x_unl = None - (x_lab_trans, y_trans), x_unl_trans = preprocess( - x_lab=x_lab, - y_lab=y, - x_unl=x_unl, + return preprocess( + x_lab=x_lbl, + y_lab=y_lbl, x_scaler=self.x_scaler, y_encoder=self.y_encoder, + x_unl=x_unl, ) - if self.training: - return (x_lab_trans, y_trans), x_unl_trans - else: - return x_lab_trans, y_trans diff --git a/src/frdc/train/utils.py b/src/frdc/train/utils.py index 57e3ad2..3d5bac4 100644 --- a/src/frdc/train/utils.py +++ b/src/frdc/train/utils.py @@ -52,50 +52,6 @@ def sharpen(y: torch.Tensor, temp: float) -> torch.Tensor: return y_sharp -def x_standard_scale( - x_scaler: StandardScaler, x: torch.Tensor -) -> torch.Tensor: - """Standard scales the data - - Notes: - This is a wrapper around the StandardScaler to handle PyTorch tensors. - - Args: - x_scaler: The StandardScaler to use. - x: The data to standard scale, of shape (B, C, H, W). - """ - # Standard Scaler only accepts (n_samples, n_features), - # so we need to do some fancy reshaping. - # Note that moving dimensions then reshaping is different from just - # reshaping! - - # Move Channel to the last dimension then transform - # B x C x H x W -> B x H x W x C - b, c, h, w = x.shape - x_ss = x_scaler.transform(x.permute(0, 2, 3, 1).reshape(-1, c)) - - # Move Channel back to the second dimension - # B x H x W x C -> B x C x H x W - return torch.nan_to_num( - torch.from_numpy(x_ss.reshape(b, h, w, c)).permute(0, 3, 1, 2).float() - ) - - -def y_encode(y_encoder: OrdinalEncoder, y: torch.Tensor) -> torch.Tensor: - """Encodes the labels - - Notes: - This is a wrapper around the OrdinalEncoder to handle PyTorch tensors. - - Args: - y_encoder: The OrdinalEncoder to use. - y: The labels to encode. - """ - return torch.from_numpy( - y_encoder.transform(np.array(y).reshape(-1, 1)).squeeze() - ) - - def preprocess( x_lab: torch.Tensor, y_lab: torch.Tensor, @@ -105,6 +61,13 @@ def preprocess( ) -> tuple[tuple[torch.Tensor, torch.Tensor], list[torch.Tensor]]: """Preprocesses the data + Notes: + The reason why x and y's preprocessing is coupled is due to the NaN + elimination step. The NaN elimination step is due to unseen labels by y + + fn_recursive is to recursively apply some function to a nested list. + This happens due to unlabelled being a list of tensors. + Args: x_lab: The data to preprocess. y_lab: The labels to preprocess. @@ -143,6 +106,50 @@ def preprocess( return (x_lab_trans, y_trans.long()), x_unl_trans +def x_standard_scale( + x_scaler: StandardScaler, x: torch.Tensor +) -> torch.Tensor: + """Standard scales the data + + Notes: + This is a wrapper around the StandardScaler to handle PyTorch tensors. + + Args: + x_scaler: The StandardScaler to use. + x: The data to standard scale, of shape (B, C, H, W). + """ + # Standard Scaler only accepts (n_samples, n_features), + # so we need to do some fancy reshaping. + # Note that moving dimensions then reshaping is different from just + # reshaping! + + # Move Channel to the last dimension then transform + # B x C x H x W -> B x H x W x C + b, c, h, w = x.shape + x_ss = x_scaler.transform(x.permute(0, 2, 3, 1).reshape(-1, c)) + + # Move Channel back to the second dimension + # B x H x W x C -> B x C x H x W + return torch.nan_to_num( + torch.from_numpy(x_ss.reshape(b, h, w, c)).permute(0, 3, 1, 2).float() + ) + + +def y_encode(y_encoder: OrdinalEncoder, y: torch.Tensor) -> torch.Tensor: + """Encodes the labels + + Notes: + This is a wrapper around the OrdinalEncoder to handle PyTorch tensors. + + Args: + y_encoder: The OrdinalEncoder to use. + y: The labels to encode. + """ + return torch.from_numpy( + y_encoder.transform(np.array(y).reshape(-1, 1)).squeeze() + ) + + def wandb_hist(x: torch.Tensor, num_bins: int) -> wandb.Histogram: """Records a W&B Histogram""" return wandb.Histogram( From c8d9e8cd1a8c9905bd51b00af057b9a6cf0edaf2 Mon Sep 17 00:00:00 2001 From: Evening Date: Wed, 29 May 2024 16:44:15 +0800 Subject: [PATCH 03/10] Comment on difficult to understand casting --- src/frdc/train/frdc_datamodule.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/frdc/train/frdc_datamodule.py b/src/frdc/train/frdc_datamodule.py index cc27ac7..83ff905 100644 --- a/src/frdc/train/frdc_datamodule.py +++ b/src/frdc/train/frdc_datamodule.py @@ -66,6 +66,8 @@ class FRDCDataModule(LightningDataModule): def __post_init__(self): super().__init__() + # This provides a failsafe interface if somehow someone used the + # labelled dataset as the unlabelled dataset. if isinstance(self.train_unl_ds, FRDCDataset): self.train_unl_ds.__class__ = FRDCUnlabelledDataset From 6dc8a00fbd4f859c9082c8122961dc4e3c63a679 Mon Sep 17 00:00:00 2001 From: Evening Date: Wed, 29 May 2024 16:49:03 +0800 Subject: [PATCH 04/10] Change _lab to _lbl for syntax consistency --- src/frdc/train/fixmatch_module.py | 4 ++-- src/frdc/train/mixmatch_module.py | 4 ++-- src/frdc/train/utils.py | 18 +++++++++--------- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/src/frdc/train/fixmatch_module.py b/src/frdc/train/fixmatch_module.py index 40b2dec..1e0602b 100644 --- a/src/frdc/train/fixmatch_module.py +++ b/src/frdc/train/fixmatch_module.py @@ -199,8 +199,8 @@ def on_before_batch_transfer(self, batch: Any, dataloader_idx: int) -> Any: x_unl = None return preprocess( - x_lab=x_lbl, - y_lab=y_lbl, + x_lbl=x_lbl, + y_lbl=y_lbl, x_scaler=self.x_scaler, y_encoder=self.y_encoder, x_unl=x_unl, diff --git a/src/frdc/train/mixmatch_module.py b/src/frdc/train/mixmatch_module.py index c99e5ac..e9ec317 100644 --- a/src/frdc/train/mixmatch_module.py +++ b/src/frdc/train/mixmatch_module.py @@ -257,8 +257,8 @@ def on_before_batch_transfer(self, batch: Any, dataloader_idx: int) -> Any: x_unl = None return preprocess( - x_lab=x_lbl, - y_lab=y_lbl, + x_lbl=x_lbl, + y_lbl=y_lbl, x_scaler=self.x_scaler, y_encoder=self.y_encoder, x_unl=x_unl, diff --git a/src/frdc/train/utils.py b/src/frdc/train/utils.py index 3d5bac4..03e653e 100644 --- a/src/frdc/train/utils.py +++ b/src/frdc/train/utils.py @@ -53,8 +53,8 @@ def sharpen(y: torch.Tensor, temp: float) -> torch.Tensor: def preprocess( - x_lab: torch.Tensor, - y_lab: torch.Tensor, + x_lbl: torch.Tensor, + y_lbl: torch.Tensor, x_scaler: StandardScaler, y_encoder: OrdinalEncoder, x_unl: list[torch.Tensor] = None, @@ -69,8 +69,8 @@ def preprocess( This happens due to unlabelled being a list of tensors. Args: - x_lab: The data to preprocess. - y_lab: The labels to preprocess. + x_lbl: The data to preprocess. + y_lbl: The labels to preprocess. x_scaler: The StandardScaler to use. y_encoder: The OrdinalEncoder to use. @@ -80,8 +80,8 @@ def preprocess( x_unl = [] if x_unl is None else x_unl - x_lab_trans = x_standard_scale(x_scaler, x_lab) - y_trans = y_encode(y_encoder, y_lab) + x_lbl_trans = x_standard_scale(x_scaler, x_lbl) + y_trans = y_encode(y_encoder, y_lbl) x_unl_trans = fn_recursive( x_unl, fn=lambda x: x_standard_scale(x_scaler, x), @@ -93,8 +93,8 @@ def preprocess( # Ordinal Encoders can return a np.nan if the value is not in the # categories. We will remove that from the batch. nan = ~torch.isnan(y_trans) - x_lab_trans = x_lab_trans[nan] - x_lab_trans = torch.nan_to_num(x_lab_trans) + x_lbl_trans = x_lbl_trans[nan] + x_lbl_trans = torch.nan_to_num(x_lbl_trans) x_unl_trans = fn_recursive( x_unl_trans, fn=lambda x: torch.nan_to_num(x[nan]), @@ -103,7 +103,7 @@ def preprocess( ) y_trans = y_trans[nan] - return (x_lab_trans, y_trans.long()), x_unl_trans + return (x_lbl_trans, y_trans.long()), x_unl_trans def x_standard_scale( From 5bd9fb29699eb74a567d0fa7e606ffb1fe3ed16b Mon Sep 17 00:00:00 2001 From: Evening Date: Wed, 29 May 2024 17:49:06 +0800 Subject: [PATCH 05/10] Fix issue preprocessing breaking w/ 1 sample --- src/frdc/train/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frdc/train/utils.py b/src/frdc/train/utils.py index 03e653e..ee6c88a 100644 --- a/src/frdc/train/utils.py +++ b/src/frdc/train/utils.py @@ -146,7 +146,7 @@ def y_encode(y_encoder: OrdinalEncoder, y: torch.Tensor) -> torch.Tensor: y: The labels to encode. """ return torch.from_numpy( - y_encoder.transform(np.array(y).reshape(-1, 1)).squeeze() + y_encoder.transform(np.array(y).reshape(-1, 1))[..., 0] ) From a55128fd9a39eb892084a87076c3abe90f2d8797 Mon Sep 17 00:00:00 2001 From: Evening Date: Thu, 30 May 2024 15:34:54 +0800 Subject: [PATCH 06/10] Fix issue with bad batch spread syntax --- src/frdc/train/mixmatch_module.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/frdc/train/mixmatch_module.py b/src/frdc/train/mixmatch_module.py index e9ec317..fb888b5 100644 --- a/src/frdc/train/mixmatch_module.py +++ b/src/frdc/train/mixmatch_module.py @@ -194,7 +194,7 @@ def on_after_backward(self) -> None: self.update_ema() def validation_step(self, batch, batch_idx): - x, y = batch + (x, y), _x_unls = batch wandb.log({"val/y_lbl": wandb_hist(y, self.n_classes)}) y_pred = self.ema_model(x) wandb.log( @@ -214,7 +214,7 @@ def validation_step(self, batch, batch_idx): return loss def test_step(self, batch, batch_idx): - x, y = batch + (x, y), _x_unls = batch y_pred = self.ema_model(x) loss = F.cross_entropy(y_pred, y.long()) @@ -226,7 +226,7 @@ def test_step(self, batch, batch_idx): return loss def predict_step(self, batch, *args, **kwargs) -> Any: - x, y = batch + (x, y), _x_unls = batch y_pred = self.ema_model(x) y_true_str = self.y_encoder.inverse_transform( y.cpu().numpy().reshape(-1, 1) From e5d2c954a0ae8dd35f4bb6765a55186efeb6c215 Mon Sep 17 00:00:00 2001 From: Evening Date: Fri, 31 May 2024 09:33:31 +0800 Subject: [PATCH 07/10] Reduce epochs This is because both models tend to converge (in acc) at this point already --- tests/model_tests/chestnut_dec_may/train_fixmatch.py | 2 +- tests/model_tests/chestnut_dec_may/train_mixmatch.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/model_tests/chestnut_dec_may/train_fixmatch.py b/tests/model_tests/chestnut_dec_may/train_fixmatch.py index f17c791..9e665ce 100644 --- a/tests/model_tests/chestnut_dec_may/train_fixmatch.py +++ b/tests/model_tests/chestnut_dec_may/train_fixmatch.py @@ -136,7 +136,7 @@ def main( if __name__ == "__main__": BATCH_SIZE = 32 - EPOCHS = 50 + EPOCHS = 10 TRAIN_ITERS = 25 LR = 3e-3 diff --git a/tests/model_tests/chestnut_dec_may/train_mixmatch.py b/tests/model_tests/chestnut_dec_may/train_mixmatch.py index d8a5fc4..c9da37c 100644 --- a/tests/model_tests/chestnut_dec_may/train_mixmatch.py +++ b/tests/model_tests/chestnut_dec_may/train_mixmatch.py @@ -127,7 +127,7 @@ def main( if __name__ == "__main__": BATCH_SIZE = 32 - EPOCHS = 50 + EPOCHS = 15 TRAIN_ITERS = 25 LR = 1e-3 From db540bb54c3c920b6309121945443d5f0e038e67 Mon Sep 17 00:00:00 2001 From: Evening Date: Fri, 31 May 2024 09:33:52 +0800 Subject: [PATCH 08/10] Make reports a+ to "stack" --- tests/model_tests/chestnut_dec_may/train_fixmatch.py | 4 ++-- tests/model_tests/chestnut_dec_may/train_mixmatch.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/model_tests/chestnut_dec_may/train_fixmatch.py b/tests/model_tests/chestnut_dec_may/train_fixmatch.py index 9e665ce..0bd2346 100644 --- a/tests/model_tests/chestnut_dec_may/train_fixmatch.py +++ b/tests/model_tests/chestnut_dec_may/train_fixmatch.py @@ -109,9 +109,9 @@ def main( trainer.fit(m, datamodule=dm) - with open(Path(__file__).parent / "report.md", "w") as f: + with open(Path(__file__).parent / "report.md", "a+") as f: f.write( - f"# Chestnut Nature Park (Dec 2020 vs May 2021)\n" + f"# Chestnut Nature Park (Dec 2020 vs May 2021) FixMatch\n" f"- Results: [WandB Report]({wandb.run.get_url()})" ) diff --git a/tests/model_tests/chestnut_dec_may/train_mixmatch.py b/tests/model_tests/chestnut_dec_may/train_mixmatch.py index c9da37c..9f82c25 100644 --- a/tests/model_tests/chestnut_dec_may/train_mixmatch.py +++ b/tests/model_tests/chestnut_dec_may/train_mixmatch.py @@ -100,9 +100,9 @@ def main( trainer.fit(m, datamodule=dm) - with open(Path(__file__).parent / "report.md", "w") as f: + with open(Path(__file__).parent / "report.md", "a+") as f: f.write( - f"# Chestnut Nature Park (Dec 2020 vs May 2021)\n" + f"# Chestnut Nature Park (Dec 2020 vs May 2021) MixMatch\n" f"- Results: [WandB Report]({wandb.run.get_url()})" ) From ceaebb4ca4f7a9c7cbbfb5d42425b62519921dff Mon Sep 17 00:00:00 2001 From: Evening Date: Fri, 31 May 2024 09:34:14 +0800 Subject: [PATCH 09/10] Make GH Actions run both models --- .github/workflows/model-tests.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/model-tests.yml b/.github/workflows/model-tests.yml index ca734ad..a04f378 100644 --- a/.github/workflows/model-tests.yml +++ b/.github/workflows/model-tests.yml @@ -85,7 +85,8 @@ jobs: working-directory: ${{ github.workspace }}/tests run: | git config --global --add safe.directory /__w/FRDC-ML/FRDC-ML - python3 -m model_tests.chestnut_dec_may.train + python3 -m model_tests.chestnut_dec_may.train_mixmatch + python3 -m model_tests.chestnut_dec_may.train_fixmatch - name: Comment results via CML run: | From 226b0d9a9c827e27ebc48c8ea54877633cbbd7f2 Mon Sep 17 00:00:00 2001 From: Evening Date: Fri, 31 May 2024 09:36:14 +0800 Subject: [PATCH 10/10] Format Black --- src/frdc/load/label_studio.py | 1 - src/frdc/train/mixmatch_module.py | 3 --- src/frdc/utils/training.py | 1 + tests/model_tests/chestnut_dec_may/train_mixmatch.py | 1 - 4 files changed, 1 insertion(+), 5 deletions(-) diff --git a/src/frdc/load/label_studio.py b/src/frdc/load/label_studio.py index 0b587f1..5486e92 100644 --- a/src/frdc/load/label_studio.py +++ b/src/frdc/load/label_studio.py @@ -8,7 +8,6 @@ from frdc.conf import LABEL_STUDIO_CLIENT - logger = logging.getLogger(__name__) diff --git a/src/frdc/train/mixmatch_module.py b/src/frdc/train/mixmatch_module.py index fb888b5..300d25e 100644 --- a/src/frdc/train/mixmatch_module.py +++ b/src/frdc/train/mixmatch_module.py @@ -3,7 +3,6 @@ from abc import abstractmethod from typing import Any -import numpy as np import torch import torch.nn.functional as F import wandb @@ -16,8 +15,6 @@ mix_up, sharpen, wandb_hist, - x_standard_scale, - y_encode, preprocess, ) diff --git a/src/frdc/utils/training.py b/src/frdc/utils/training.py index e69ae20..c5cfa14 100644 --- a/src/frdc/utils/training.py +++ b/src/frdc/utils/training.py @@ -1,4 +1,5 @@ from __future__ import annotations + from pathlib import Path import lightning as pl diff --git a/tests/model_tests/chestnut_dec_may/train_mixmatch.py b/tests/model_tests/chestnut_dec_may/train_mixmatch.py index 9f82c25..d70ff06 100644 --- a/tests/model_tests/chestnut_dec_may/train_mixmatch.py +++ b/tests/model_tests/chestnut_dec_may/train_mixmatch.py @@ -15,7 +15,6 @@ EarlyStopping, ) from lightning.pytorch.loggers import WandbLogger -from sklearn.preprocessing import StandardScaler, OrdinalEncoder from frdc.load.preset import FRDCDatasetPreset as ds from frdc.models.efficientnetb1 import EfficientNetB1MixMatchModule