From 6b87778163bba5e17ce72447872325e25336e953 Mon Sep 17 00:00:00 2001 From: Laurent Date: Wed, 6 Nov 2024 17:03:18 +0000 Subject: [PATCH] fix typos --- src/refiners/foundationals/segment_anything/model.py | 2 +- src/refiners/foundationals/swin/swin_transformer.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/refiners/foundationals/segment_anything/model.py b/src/refiners/foundationals/segment_anything/model.py index ea4b44a2c..286d291f0 100644 --- a/src/refiners/foundationals/segment_anything/model.py +++ b/src/refiners/foundationals/segment_anything/model.py @@ -80,7 +80,7 @@ def mask_decoder(self) -> MaskDecoder: @no_grad() def compute_image_embedding(self, image: Image.Image) -> ImageEmbedding: - """Compute the emmbedding of an image. + """Compute the embedding of an image. Args: image: The image to compute the embedding of. diff --git a/src/refiners/foundationals/swin/swin_transformer.py b/src/refiners/foundationals/swin/swin_transformer.py index 488819e93..b1aedc8f8 100644 --- a/src/refiners/foundationals/swin/swin_transformer.py +++ b/src/refiners/foundationals/swin/swin_transformer.py @@ -205,7 +205,7 @@ def forward(self, x: Tensor): class WindowAttention(fl.Chain): """ - Window-based Multi-head Self-Attenion (W-MSA), optionally shifted (SW-MSA). + Window-based Multi-head Self-Attention (W-MSA), optionally shifted (SW-MSA). It has a trainable relative position bias (RelativePositionBias).