From cf7ad674afec594d7d423e5b5d8354aa06084e4d Mon Sep 17 00:00:00 2001 From: Ella Charlaix Date: Thu, 14 Dec 2023 19:40:01 +0100 Subject: [PATCH] fix inc quantized model loading --- optimum/intel/neural_compressor/modeling_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/optimum/intel/neural_compressor/modeling_base.py b/optimum/intel/neural_compressor/modeling_base.py index e6ae0f2595..5cd224146a 100644 --- a/optimum/intel/neural_compressor/modeling_base.py +++ b/optimum/intel/neural_compressor/modeling_base.py @@ -164,7 +164,7 @@ def _from_pretrained( if q_config is None: model = model_class.from_pretrained(model_save_dir) else: - init_contexts = [no_init_weights(_enable=True)] + init_contexts = [no_init_weights(_enable=False)] with ContextManagers(init_contexts): model = model_class(config) try: