Skip to content

Commit

Permalink
Fix imports from deprecated utils
Browse files Browse the repository at this point in the history
  • Loading branch information
Eve-ning committed Oct 20, 2023
1 parent c441099 commit d266921
Showing 1 changed file with 7 additions and 9 deletions.
16 changes: 7 additions & 9 deletions pipeline/model_tests/chestnut_dec_may/preprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,8 @@
from torchvision.transforms.v2 import Resize

from frdc.models import FaceNet
from frdc.preprocess import scale_0_1_per_band
from frdc.preprocess.glcm_padded import append_glcm_padded_cached
from frdc.preprocess.scale import scale_normal_per_band
from frdc.preprocess.scale import scale_normal_per_band, scale_0_1_per_band


# TODO: Eventually, we will have multiple tests, and we should try to make
Expand All @@ -22,13 +21,12 @@ def channel_preprocess(ar: np.ndarray) -> np.ndarray:
def segment_preprocess(ar: np.ndarray) -> torch.Tensor:
# Preprocesses a segment array of shape: (H, W, C)

# We divide by 1.001 is make the range [0, 1) instead of [0, 1] so that
# glcm_padded can work properly.
ar = scale_0_1_per_band(ar) / 1.001
# We scale 0 1 before GLCM so that binning works
ar = append_glcm_padded_cached(ar,
step_size=7, bin_from=1, bin_to=128,
radius=3, features=(Features.MEAN,))
# Add a small epsilon to avoid upper bound of 1.0
ar = scale_0_1_per_band(ar, epsilon=0.001)
ar = append_glcm_padded_cached(
ar, step_size=7, bin_from=1, bin_to=128, radius=3,
features=(Features.MEAN,)
)
# We can then scale normal for better neural network convergence
ar = scale_normal_per_band(ar)

Expand Down

0 comments on commit d266921

Please sign in to comment.