Skip to content

Commit

Permalink
Bump transformers version (huggingface#622)
Browse files Browse the repository at this point in the history
* Bump transformers version

* Add generation config

* update setup

* fix

* add  _convert_tokens_to_ids
  • Loading branch information
echarlaix authored Mar 22, 2024
1 parent 3879e4e commit 78b3d8c
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 7 deletions.
3 changes: 3 additions & 0 deletions optimum/intel/neural_compressor/modeling_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelForVision2Seq,
GenerationConfig,
GenerationMixin,
PretrainedConfig,
)
Expand Down Expand Up @@ -83,6 +84,8 @@ def __init__(
self._device = getattr(self.model, "device", None) or torch.device(
"cuda:0" if torch.cuda.is_available() else "cpu"
)
self.generation_config = GenerationConfig.from_model_config(config)

# Registers the INCModelForXXX classes into the transformers AutoModel classes to avoid warnings when creating
# a pipeline https://github.com/huggingface/transformers/blob/cad61b68396a1a387287a8e2e2fef78a25b79383/src/transformers/pipelines/base.py#L863
AutoConfig.register(self.base_model_prefix, AutoConfig)
Expand Down
4 changes: 2 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@

INSTALL_REQUIRE = [
"torch>=1.11",
"transformers>=4.36.0,<4.39.0",
"transformers>=4.36.0,<4.40.0",
"optimum @ git+https://github.com/huggingface/optimum.git#egg=optimum",
"datasets>=1.4.0",
"sentencepiece",
Expand Down Expand Up @@ -61,7 +61,7 @@
"openvino": ["openvino>=2023.3", "nncf>=2.8.1"],
"openvino-tokenizers": ["openvino-tokenizers[transformers]"],
"nncf": ["nncf>=2.8.1"],
"ipex": ["intel-extension-for-pytorch"],
"ipex": ["intel-extension-for-pytorch", "transformers>=4.36.0,<4.39.0"],
"diffusers": ["diffusers"],
"quality": QUALITY_REQUIRE,
"tests": TESTS_REQUIRE,
Expand Down
12 changes: 7 additions & 5 deletions tests/openvino/test_modeling.py
Original file line number Diff line number Diff line change
Expand Up @@ -501,7 +501,7 @@ class OVModelForCausalLMIntegrationTest(unittest.TestCase):
"qwen",
"qwen2",
"stablelm",
# "starcoder2", # TODO: enable with next transformers release
"starcoder2",
"phi",
)
GENERATION_LENGTH = 100
Expand All @@ -525,10 +525,8 @@ def test_compare_to_transformers(self, model_arch):

model_kwargs = {}
if model_arch in self.REMOTE_CODE_MODELS:
model_kwargs = {
"config": AutoConfig.from_pretrained(model_id, trust_remote_code=True),
"trust_remote_code": True,
}
model_kwargs = {"trust_remote_code": True}

ov_model = OVModelForCausalLM.from_pretrained(model_id, export=True, ov_config=F32_CONFIG, **model_kwargs)
self.assertIsInstance(ov_model.config, PretrainedConfig)
self.assertTrue(ov_model.use_cache)
Expand Down Expand Up @@ -572,6 +570,10 @@ def test_pipeline(self, model_arch):
"trust_remote_code": True,
}
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=model_arch in self.REMOTE_CODE_MODELS)

if model_arch == "qwen":
tokenizer._convert_tokens_to_ids = lambda x: 0

model = OVModelForCausalLM.from_pretrained(
model_id, export=True, use_cache=False, compile=False, **model_kwargs
)
Expand Down

0 comments on commit 78b3d8c

Please sign in to comment.