Skip to content

Commit

Permalink
TST test_mixed_adapter_batches_lora_opt_timing on XPU (huggingface#2021)
Browse files Browse the repository at this point in the history
  • Loading branch information
faaany authored Aug 21, 2024
1 parent 6c832c1 commit fa218e1
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions tests/test_custom_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@
from peft.utils import ModulesToSaveWrapper, infer_device

from .testing_common import PeftCommonTester
from .testing_utils import get_state_dict, require_torch_gpu
from .testing_utils import get_state_dict, require_non_cpu


# MLP is a vanilla FF network with only linear layers
Expand Down Expand Up @@ -3276,7 +3276,7 @@ def test_mixed_adapter_batches_lora_with_dora_raises(self):
with pytest.raises(ValueError, match=msg):
peft_model.forward(**inputs)

@require_torch_gpu
@require_non_cpu
def test_mixed_adapter_batches_lora_opt_timing(self):
# Use a more realistic model (opt-125m) and do a simple runtime check to ensure that mixed adapter batches
# don't add too much overhead. These types of tests are inherently flaky, so we try to add in some robustness.
Expand Down

0 comments on commit fa218e1

Please sign in to comment.