diff --git a/optimum/intel/openvino/modeling_base.py b/optimum/intel/openvino/modeling_base.py index 14ac76137f..59fc89649a 100644 --- a/optimum/intel/openvino/modeling_base.py +++ b/optimum/intel/openvino/modeling_base.py @@ -65,7 +65,6 @@ class PreTrainedModel(OptimizedModel): """, ) class OVBaseModel(PreTrainedModel): - _AUTOMODELS_TO_TASKS = {cls_name: task for task, cls_name in TasksManager._TASKS_TO_AUTOMODELS.items()} auto_model_class = None export_feature = None @@ -391,13 +390,6 @@ def _ensure_supported_device(self, device: str = None): def forward(self, *args, **kwargs): raise NotImplementedError - @classmethod - def _auto_model_to_task(cls, auto_model_class): - """ - Get the task corresponding to a class (for example AutoModelForXXX in transformers). - """ - return cls._AUTOMODELS_TO_TASKS[auto_model_class.__name__] - def can_generate(self) -> bool: """ Returns whether this model can generate sequences with `.generate()`. diff --git a/tests/openvino/test_stable_diffusion.py b/tests/openvino/test_stable_diffusion.py index 3c0c90475a..e04e2d6fd3 100644 --- a/tests/openvino/test_stable_diffusion.py +++ b/tests/openvino/test_stable_diffusion.py @@ -121,7 +121,6 @@ def callback_fn(step: int, timestep: int, latents: np.ndarray) -> None: self.assertEqual(callback_fn.number_of_steps, inputs["num_inference_steps"]) @parameterized.expand(SUPPORTED_ARCHITECTURES) - @require_diffusers def test_shape(self, model_arch: str): height, width, batch_size = 128, 64, 1 pipeline = self.MODEL_CLASS.from_pretrained(MODEL_NAMES[model_arch], export=True)