Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main' into ak/sentence_transform…
Browse files Browse the repository at this point in the history
…ers_notebook
  • Loading branch information
AlexKoff88 committed Oct 21, 2024
2 parents 6de610c + f7b5b54 commit 27e7493
Show file tree
Hide file tree
Showing 14 changed files with 253 additions and 353 deletions.
2 changes: 2 additions & 0 deletions .github/workflows/build_pr_documentation.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@ jobs:

- name: Setup environment
run: |
python -m venv venv-doc
source venv-doc/bin/activate
pip uninstall -y doc-builder
cd doc-builder
git pull origin main
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/test_openvino.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ jobs:
fail-fast: false
matrix:
python-version: ["3.8", "3.12"]
transformers-version: ["4.36.0", "4.44.*"]
transformers-version: ["4.36.0", "4.45.*"]
os: [ubuntu-latest]

runs-on: ${{ matrix.os }}
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/test_openvino_basic.yml
Original file line number Diff line number Diff line change
Expand Up @@ -77,3 +77,4 @@ jobs:
pytest tests/openvino -s -m "run_slow" --durations=0
env:
RUN_SLOW: 1
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
2 changes: 1 addition & 1 deletion .github/workflows/test_openvino_notebooks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ jobs:
# Install PyTorch CPU to prevent unnecessary downloading/installing of CUDA packages
# ffmpeg, torchaudio and pillow are required for image classification and audio classification pipelines
sudo apt-get install ffmpeg
pip install torch torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
pip install -r notebooks/openvino/requirements.txt
pip install .[tests,openvino] nbval
Expand Down
7 changes: 7 additions & 0 deletions optimum/exporters/openvino/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
_torch_version,
_transformers_version,
compare_versions,
is_tokenizers_version,
is_transformers_version,
)
from optimum.utils import DEFAULT_DUMMY_SHAPES, is_diffusers_available
Expand Down Expand Up @@ -730,6 +731,12 @@ def export_tokenizer(
except ModuleNotFoundError:
return

if is_tokenizers_version(">", "0.19"):
logger.warning(
"Exporting tokenizers to OpenVINO is not supported for tokenizers version > 0.19. "
"Please downgrade to tokenizers version <= 0.19 to export tokenizers to OpenVINO."
)

if not isinstance(output, Path):
output = Path(output)

Expand Down
49 changes: 49 additions & 0 deletions optimum/exporters/openvino/model_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@
from optimum.exporters.onnx.model_configs import (
CLIPOnnxConfig,
CLIPTextOnnxConfig,
CLIPTextWithProjectionOnnxConfig,
CLIPVisionModelOnnxConfig,
CodeGenOnnxConfig,
FalconOnnxConfig,
GemmaOnnxConfig,
Expand All @@ -35,6 +37,7 @@
PhiOnnxConfig,
VisionOnnxConfig,
)
from optimum.exporters.onnx.model_patcher import ModelPatcher
from optimum.exporters.tasks import TasksManager
from optimum.utils import DEFAULT_DUMMY_SHAPES
from optimum.utils.input_generators import (
Expand Down Expand Up @@ -1079,6 +1082,11 @@ def generate_dummy_inputs_for_validation(
reference_model_inputs["text"] = reference_model_inputs.pop("input_ids")
return super().generate_dummy_inputs_for_validation(reference_model_inputs)

def patch_model_for_export(
self, model: Union["PreTrainedModel", "TFPreTrainedModel"], model_kwargs: Optional[Dict[str, Any]] = None
) -> ModelPatcher:
return ModelPatcher(self, model, model_kwargs=model_kwargs)


@register_in_tasks_manager("clip-text-model", *["feature-extraction"], library_name="open_clip")
class OpenCLIPTextOpenVINOConfig(CLIPTextOnnxConfig):
Expand Down Expand Up @@ -1109,6 +1117,11 @@ def generate_dummy_inputs(self, framework: str = "pt", **kwargs):
dummy_inputs = super().generate_dummy_inputs(framework=framework, **kwargs)
return dummy_inputs

def patch_model_for_export(
self, model: Union["PreTrainedModel", "TFPreTrainedModel"], model_kwargs: Optional[Dict[str, Any]] = None
) -> ModelPatcher:
return ModelPatcher(self, model, model_kwargs=model_kwargs)


@register_in_tasks_manager("clip-vision-model", *["feature-extraction"], library_name="open_clip")
class OpenCLIPVisualOpenVINOConfig(VisionOnnxConfig):
Expand All @@ -1134,6 +1147,42 @@ def rename_ambiguous_inputs(self, inputs):
return model_inputs


@register_in_tasks_manager(
"clip", *["feature-extraction", "zero-shot-image-classification"], library_name="transformers"
)
class CLIPOpenVINOConfig(CLIPOnnxConfig):
def patch_model_for_export(
self, model: Union["PreTrainedModel", "TFPreTrainedModel"], model_kwargs: Optional[Dict[str, Any]] = None
) -> ModelPatcher:
return ModelPatcher(self, model, model_kwargs=model_kwargs)


@register_in_tasks_manager("clip-text-model", *["feature-extraction"], library_name="transformers")
@register_in_tasks_manager("clip-text-model", *["feature-extraction"], library_name="diffusers")
class CLIPTextOpenVINOConfig(CLIPTextOnnxConfig):
def patch_model_for_export(
self, model: Union["PreTrainedModel", "TFPreTrainedModel"], model_kwargs: Optional[Dict[str, Any]] = None
) -> ModelPatcher:
return ModelPatcher(self, model, model_kwargs=model_kwargs)


@register_in_tasks_manager("clip-text-with-projection", *["feature-extraction"], library_name="transformers")
@register_in_tasks_manager("clip-text-with-projection", *["feature-extraction"], library_name="diffusers")
class CLIPTextWithProjectionOpenVINOConfig(CLIPTextWithProjectionOnnxConfig):
def patch_model_for_export(
self, model: Union["PreTrainedModel", "TFPreTrainedModel"], model_kwargs: Optional[Dict[str, Any]] = None
) -> ModelPatcher:
return ModelPatcher(self, model, model_kwargs=model_kwargs)


@register_in_tasks_manager("clip-vision-model", *["feature-extraction"], library_name="transformers")
class CLIPVisionModelOpenVINOConfig(CLIPVisionModelOnnxConfig):
def patch_model_for_export(
self, model: Union["PreTrainedModel", "TFPreTrainedModel"], model_kwargs: Optional[Dict[str, Any]] = None
) -> ModelPatcher:
return ModelPatcher(self, model, model_kwargs=model_kwargs)


@register_in_tasks_manager(
"ibert",
*[
Expand Down
Loading

0 comments on commit 27e7493

Please sign in to comment.