From 7914ccc2ea25485658c4333e6caeec4e842d1169 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 5 Aug 2024 04:27:05 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- intel_extension_for_transformers/transformers/utils/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/intel_extension_for_transformers/transformers/utils/config.py b/intel_extension_for_transformers/transformers/utils/config.py index 5362d172cb8..48e72039d63 100644 --- a/intel_extension_for_transformers/transformers/utils/config.py +++ b/intel_extension_for_transformers/transformers/utils/config.py @@ -833,7 +833,7 @@ def __init__( self.double_quant_group_size = double_quant_group_size # "transformer.output_layer" for chatglm series model. # "embed_out" for dolly v2 series model. - self.llm_int8_skip_modules = kwargs.get("llm_int8_skip_modules", + self.llm_int8_skip_modules = kwargs.get("llm_int8_skip_modules", ["lm_head", "transformer.output_layer", "embed_out"]) self.use_ggml = use_ggml self.use_quant = use_quant