From ba910ed9d3fb611fb4d7627e06f854aa5d60396f Mon Sep 17 00:00:00 2001 From: jiqing-feng Date: Thu, 12 Dec 2024 14:15:47 +0000 Subject: [PATCH] fix when attention_mask=None Signed-off-by: jiqing-feng --- optimum/intel/ipex/modeling_base.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/optimum/intel/ipex/modeling_base.py b/optimum/intel/ipex/modeling_base.py index 8611bddd21..d8f830e519 100644 --- a/optimum/intel/ipex/modeling_base.py +++ b/optimum/intel/ipex/modeling_base.py @@ -276,6 +276,8 @@ def forward( attention_mask: Optional[torch.FloatTensor] = None, **kwargs, ) -> CausalLMOutputWithPast: + if self.add_patch and input_ids is not None and attention_mask is None: + attention_mask = torch.ones_like(input_ids) return self.model(input_ids=input_ids, attention_mask=attention_mask, **kwargs) def _prepare_generation_config(