Skip to content

Commit

Permalink
[SW-216156] Fix mixtral Fused MoE issues after rebase (HabanaAI#708)
Browse files Browse the repository at this point in the history
  • Loading branch information
dudilester authored Jan 21, 2025
1 parent fedf706 commit 37eb4fc
Showing 1 changed file with 13 additions and 10 deletions.
23 changes: 13 additions & 10 deletions vllm/model_executor/layers/fused_moe/layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,16 +160,19 @@ def forward_cuda(
topk_ids=topk_ids,
inplace=True)

def forward_hpu(self,
layer: torch.nn.Module,
x: torch.Tensor,
use_grouped_topk: bool,
top_k: int,
router_logits: torch.Tensor,
renormalize: bool,
topk_group: Optional[int] = None,
num_expert_group: Optional[int] = None,
custom_routing_function: Optional[Callable] = None):
def forward_hpu(
self,
layer: torch.nn.Module,
x: torch.Tensor,
use_grouped_topk: bool,
top_k: int,
router_logits: torch.Tensor,
renormalize: bool,
topk_group: Optional[int] = None,
num_expert_group: Optional[int] = None,
custom_routing_function: Optional[Callable] = None,
**kwargs,
):
assert not use_grouped_topk, 'use_grouped_topk must be False on HPU'
assert num_expert_group is None, ('num_expert_group is '
'not supported on HPU')
Expand Down

0 comments on commit 37eb4fc

Please sign in to comment.