diff --git a/intel_extension_for_transformers/transformers/modeling/modeling_auto.py b/intel_extension_for_transformers/transformers/modeling/modeling_auto.py index 263e4784d92..d11a72e4d43 100644 --- a/intel_extension_for_transformers/transformers/modeling/modeling_auto.py +++ b/intel_extension_for_transformers/transformers/modeling/modeling_auto.py @@ -840,6 +840,14 @@ def forward(self, input: torch.Tensor) -> tuple[torch.Tensor, None]: or device_map == torch.device("cpu") ) and model.config.model_type == "chatglm": model = model.float() + if ( + not torch.cuda.is_available() + or device_map == "cpu" + or device_map == torch.device("cpu") + ) and model.config.model_type == "mpt": + config = AutoConfig.from_pretrained("mosaicml_mpt-7b_config.json", + torchscript=True) + model.config = config model.eval() model_type = model.config.model_type.replace("_", "-") @@ -1077,6 +1085,7 @@ def calib_func(model): recipes=quantization_config.recipes, example_inputs=example_inputs, ) + model = quantization.fit( model, conf, diff --git a/intel_extension_for_transformers/transformers/modeling/mosaicml_mpt-7b_config.json b/intel_extension_for_transformers/transformers/modeling/mosaicml_mpt-7b_config.json new file mode 100644 index 00000000000..9a9cc31be91 --- /dev/null +++ b/intel_extension_for_transformers/transformers/modeling/mosaicml_mpt-7b_config.json @@ -0,0 +1,48 @@ +{ + "architectures": [ + "MptForCausalLM" + ], + "attn_config": { + "alibi": true, + "alibi_bias_max": 8, + "attn_impl": "torch", + "attn_pdrop": 0, + "attn_type": "multihead_attention", + "attn_uses_sequence_id": false, + "clip_qkv": null, + "prefix_lm": false, + "qk_ln": false, + "softmax_scale": null + }, + "d_model": 4096, + "emb_pdrop": 0, + "embedding_fraction": 1.0, + "expansion_ratio": 4, + "init_config": { + "emb_init_std": null, + "emb_init_uniform_lim": null, + "fan_mode": "fan_in", + "init_div_is_residual": true, + "init_gain": 0, + "init_nonlinearity": "relu", + "init_std": 0.02, + "name": "kaiming_normal_", + "verbose": 0 + }, + "init_device": "cpu", + "learned_pos_emb": true, + "logit_scale": null, + "max_seq_len": 2048, + "model_type": "mpt", + "n_heads": 32, + "n_layers": 32, + "no_bias": true, + "norm_type": "low_precision_layernorm", + "resid_pdrop": 0, + "tokenizer_name": "EleutherAI/gpt-neox-20b", + "torch_dtype": "bfloat16", + "transformers_version": "4.28.1", + "use_cache": false, + "verbose": 0, + "vocab_size": 50432 +}