Skip to content

Commit

Permalink
Merge branch 'main' into fix-main-export
Browse files Browse the repository at this point in the history
  • Loading branch information
echarlaix committed Sep 28, 2023
2 parents 319eac3 + edd888b commit 23b9627
Show file tree
Hide file tree
Showing 4 changed files with 194 additions and 10 deletions.
106 changes: 106 additions & 0 deletions optimum/commands/export/openvino.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the command line for the export with OpenVINO."""

import sys
from pathlib import Path
from typing import TYPE_CHECKING, Optional

from ...exporters import TasksManager
from ..base import BaseOptimumCLICommand, CommandInfo


if TYPE_CHECKING:
from argparse import ArgumentParser, Namespace, _SubParsersAction


def parse_args_openvino(parser: "ArgumentParser"):
required_group = parser.add_argument_group("Required arguments")
required_group.add_argument(
"-m", "--model", type=str, required=True, help="Model ID on huggingface.co or path on disk to load model from."
)
required_group.add_argument(
"output", type=Path, help="Path indicating the directory where to store the generated OV model."
)
optional_group = parser.add_argument_group("Optional arguments")
optional_group.add_argument(
"--task",
default="auto",
help=(
"The task to export the model for. If not specified, the task will be auto-inferred based on the model. Available tasks depend on the model, but are among:"
f" {str(TasksManager.get_all_tasks())}. For decoder models, use `xxx-with-past` to export the model using past key values in the decoder."
),
)
optional_group.add_argument("--cache_dir", type=str, default=None, help="Path indicating where to store cache.")
optional_group.add_argument(
"--framework",
type=str,
choices=["pt", "tf"],
default=None,
help=(
"The framework to use for the export. If not provided, will attempt to use the local checkpoint's original framework or what is available in the environment."
),
)
optional_group.add_argument(
"--trust-remote-code",
action="store_true",
help=(
"Allows to use custom code for the modeling hosted in the model repository. This option should only be set for repositories you trust and in which "
"you have read the code, as it will execute on your local machine arbitrary code present in the model repository."
),
)
optional_group.add_argument(
"--pad-token-id",
type=int,
default=None,
help=(
"This is needed by some models, for some tasks. If not provided, will attempt to use the tokenizer to guess it."
),
)


class OVExportCommand(BaseOptimumCLICommand):
COMMAND = CommandInfo(name="openvino", help="Export PyTorch models to OpenVINO IR.")

def __init__(
self,
subparsers: "_SubParsersAction",
args: Optional["Namespace"] = None,
command: Optional["CommandInfo"] = None,
from_defaults_factory: bool = False,
parser: Optional["ArgumentParser"] = None,
):
super().__init__(
subparsers, args=args, command=command, from_defaults_factory=from_defaults_factory, parser=parser
)
self.args_string = " ".join(sys.argv[3:])

@staticmethod
def parse_args(parser: "ArgumentParser"):
return parse_args_openvino(parser)

def run(self):
from ...exporters.openvino.__main__ import main_export

# TODO : add input shapes
main_export(
model_name_or_path=self.args.model,
output=self.args.output,
task=self.args.task,
framework=self.args.framework,
cache_dir=self.args.cache_dir,
trust_remote_code=self.args.trust_remote_code,
pad_token_id=self.args.pad_token_id,
# **input_shapes,
)
19 changes: 19 additions & 0 deletions optimum/commands/register/register_openvino.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from ..export import ExportCommand
from ..export.openvino import OVExportCommand


REGISTER_COMMANDS = [(OVExportCommand, ExportCommand)]
59 changes: 59 additions & 0 deletions tests/openvino/test_exporters_cli.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import unittest
from tempfile import TemporaryDirectory

from parameterized import parameterized
from utils_tests import MODEL_NAMES

from optimum.exporters.openvino.__main__ import main_export


class OVCLIExportTestCase(unittest.TestCase):
"""
Integration tests ensuring supported models are correctly exported.
"""

SUPPORTED_ARCHITECTURES = (
["causal-lm", "gpt2"],
["causal-lm-with-past", "gpt2"],
["seq2seq-lm", "t5"],
["seq2seq-lm-with-past", "t5"],
["sequence-classification", "bert"],
["question-answering", "distilbert"],
["masked-lm", "bert"],
["default", "blenderbot"],
["default-with-past", "blenderbot"],
["stable-diffusion", "stable-diffusion"],
["stable-diffusion-xl", "stable-diffusion-xl"],
["stable-diffusion-xl", "stable-diffusion-xl-refiner"],
)

def _openvino_export(self, model_name: str, task: str):
with TemporaryDirectory() as tmpdir:
main_export(model_name_or_path=model_name, output=tmpdir, task=task)

@parameterized.expand(SUPPORTED_ARCHITECTURES)
def test_export(self, task: str, model_type: str):
self._openvino_export(MODEL_NAMES[model_type], task)

@parameterized.expand(SUPPORTED_ARCHITECTURES)
def test_exporters_cli(self, task: str, model_type: str):
with TemporaryDirectory() as tmpdirname:
subprocess.run(
f"optimum-cli export openvino --model {MODEL_NAMES[model_type]} --task {task} {tmpdirname}",
shell=True,
check=True,
)
20 changes: 10 additions & 10 deletions tests/openvino/test_modeling.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,10 +121,12 @@ def test_load_from_hub_and_save_model(self):
del model
gc.collect()

def test_load_from_hub_and_save_decoder_model(self):
tokenizer = AutoTokenizer.from_pretrained(self.OV_DECODER_MODEL_ID)
@parameterized.expand((True, False))
def test_load_from_hub_and_save_decoder_model(self, use_cache):
model_id = "vuiseng9/ov-gpt2-fp32-kv-cache" if use_cache else "vuiseng9/ov-gpt2-fp32-no-cache"
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokens = tokenizer("This is a sample input", return_tensors="pt")
loaded_model = OVModelForCausalLM.from_pretrained(self.OV_DECODER_MODEL_ID, use_cache=True)
loaded_model = OVModelForCausalLM.from_pretrained(model_id, use_cache=use_cache)
self.assertIsInstance(loaded_model.config, PretrainedConfig)
loaded_model_outputs = loaded_model(**tokens)

Expand All @@ -133,7 +135,8 @@ def test_load_from_hub_and_save_decoder_model(self):
folder_contents = os.listdir(tmpdirname)
self.assertTrue(OV_XML_FILE_NAME in folder_contents)
self.assertTrue(OV_XML_FILE_NAME.replace(".xml", ".bin") in folder_contents)
model = OVModelForCausalLM.from_pretrained(tmpdirname, use_cache=True)
model = OVModelForCausalLM.from_pretrained(tmpdirname, use_cache=use_cache)
self.assertEqual(model.use_cache, use_cache)

outputs = model(**tokens)
self.assertTrue(torch.equal(loaded_model_outputs.logits, outputs.logits))
Expand Down Expand Up @@ -540,6 +543,7 @@ def test_compare_with_and_without_past_key_values(self):
)

model_without_pkv = OVModelForCausalLM.from_pretrained(model_id, export=True, use_cache=False)

# Warmup
_ = model_without_pkv.generate(**tokens)
with Timer() as without_pkv_timer:
Expand Down Expand Up @@ -710,12 +714,8 @@ def test_timm_save_and_infer(self, model_id):
with tempfile.TemporaryDirectory() as tmpdirname:
model_save_path = os.path.join(tmpdirname, "timm_ov_model")
ov_model.save_pretrained(model_save_path)
new_ov_model = OVModelForImageClassification.from_pretrained(
model_save_path,
)
new_ov_model(
pixel_values=torch.zeros((5, 3, new_ov_model.config.image_size, new_ov_model.config.image_size))
)
model = OVModelForImageClassification.from_pretrained(model_save_path)
model(pixel_values=torch.zeros((5, 3, model.config.image_size, model.config.image_size)))
gc.collect()


Expand Down

0 comments on commit 23b9627

Please sign in to comment.