diff --git a/.github/workflows/test_inc.yml b/.github/workflows/test_inc.yml index fd5fd16509..3a15214f99 100644 --- a/.github/workflows/test_inc.yml +++ b/.github/workflows/test_inc.yml @@ -30,7 +30,8 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install .[neural-compressor,ipex,diffusers,tests] + pip install .[neural-compressor,diffusers,tests] + pip install intel-extension-for-pytorch - name: Test with Pytest run: | pytest tests/neural_compressor/ diff --git a/optimum/intel/neural_compressor/trainer.py b/optimum/intel/neural_compressor/trainer.py index be4437a2be..cbd08bb5f2 100644 --- a/optimum/intel/neural_compressor/trainer.py +++ b/optimum/intel/neural_compressor/trainer.py @@ -41,8 +41,7 @@ from transformers.file_utils import WEIGHTS_NAME # Integrations must be imported before ML frameworks: -from transformers.integrations import hp_params -from transformers.integrations.deepspeed import deepspeed_init, deepspeed_load_checkpoint +from transformers.integrations import deepspeed_init, deepspeed_load_checkpoint, hp_params from transformers.modeling_utils import PreTrainedModel, get_parameter_dtype, unwrap_model from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from transformers.pytorch_utils import is_torch_less_than_1_11 diff --git a/optimum/intel/openvino/trainer.py b/optimum/intel/openvino/trainer.py index 065931c4e0..3e64a34b09 100644 --- a/optimum/intel/openvino/trainer.py +++ b/optimum/intel/openvino/trainer.py @@ -54,8 +54,7 @@ from transformers import Trainer from transformers.data.data_collator import DataCollator from transformers.debug_utils import DebugOption, DebugUnderflowOverflow -from transformers.integrations import hp_params -from transformers.integrations.deepspeed import deepspeed_init, deepspeed_load_checkpoint +from transformers.integrations import deepspeed_init, deepspeed_load_checkpoint, hp_params from transformers.modeling_utils import PreTrainedModel, unwrap_model from transformers.pytorch_utils import is_torch_less_than_1_11 from transformers.tokenization_utils_base import PreTrainedTokenizerBase