diff --git a/src/refiners/foundationals/segment_anything/model.py b/src/refiners/foundationals/segment_anything/model.py index ea4b44a2c..286d291f0 100644 --- a/src/refiners/foundationals/segment_anything/model.py +++ b/src/refiners/foundationals/segment_anything/model.py @@ -80,7 +80,7 @@ def mask_decoder(self) -> MaskDecoder: @no_grad() def compute_image_embedding(self, image: Image.Image) -> ImageEmbedding: - """Compute the emmbedding of an image. + """Compute the embedding of an image. Args: image: The image to compute the embedding of. diff --git a/src/refiners/foundationals/swin/swin_transformer.py b/src/refiners/foundationals/swin/swin_transformer.py index 488819e93..b1aedc8f8 100644 --- a/src/refiners/foundationals/swin/swin_transformer.py +++ b/src/refiners/foundationals/swin/swin_transformer.py @@ -205,7 +205,7 @@ def forward(self, x: Tensor): class WindowAttention(fl.Chain): """ - Window-based Multi-head Self-Attenion (W-MSA), optionally shifted (SW-MSA). + Window-based Multi-head Self-Attention (W-MSA), optionally shifted (SW-MSA). It has a trainable relative position bias (RelativePositionBias).