Skip to content

Commit

Permalink
Expose ImageNet Scaling option
Browse files Browse the repository at this point in the history
  • Loading branch information
Eve-ning committed Feb 21, 2024
1 parent 4a179c0 commit 2765a59
Showing 1 changed file with 12 additions and 3 deletions.
15 changes: 12 additions & 3 deletions src/frdc/models/inceptionv3.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,18 @@ def __init__(
x_scaler: StandardScaler,
y_encoder: OrdinalEncoder,
ema_lr: float = 0.001,
imagenet_scaling: bool = False,
):
"""Initialize the InceptionV3 model.
Args:
n_classes: The number of output classes
in_channels: The number of input channels.
n_classes: The number of classes.
lr: The learning rate.
x_scaler: The X input StandardScaler.
y_encoder: The Y input OrdinalEncoder.
ema_lr: The learning rate for the EMA model.
imagenet_scaling: Whether to use the adapted ImageNet scaling.
Notes:
- Min input size: 299 x 299.
Expand Down Expand Up @@ -129,7 +136,7 @@ def adapt_inception_multi_channel(
return inception

@staticmethod
def transform_input(x: torch.Tensor) -> torch.Tensor:
def imagenet_scaling(x: torch.Tensor) -> torch.Tensor:
"""Perform adapted ImageNet normalization on the input tensor.
See Also:
Expand Down Expand Up @@ -181,7 +188,9 @@ def forward(self, x: torch.Tensor):
f"Got: {x.shape[2]} x {x.shape[3]}."
)

x = self.transform_input(x)
if self.imagenet_scaling:
x = self.imagenet_scaling(x)

# During training, the auxiliary outputs are used for auxiliary loss,
# but during testing, only the main output is used.
if self.training:
Expand Down

0 comments on commit 2765a59

Please sign in to comment.