Skip to content

Commit

Permalink
run hf_olmo tests too
Browse files Browse the repository at this point in the history
  • Loading branch information
AkshitaB committed Dec 11, 2023
1 parent 4609f13 commit 0c8bcd5
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 1 deletion.
2 changes: 1 addition & 1 deletion .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ jobs:
value: ":16:8"
- name: TOKENIZERS_PARALLELISM
value: "false"
command: ["/entrypoint.sh", "pytest", "-v", "-m", "gpu", "tests/", "-k", "not hf_olmo"]
command: ["/entrypoint.sh", "pytest", "-v", "-m", "gpu", "tests/"]
result:
path: /unused
token: ${{ env.BEAKER_TOKEN }}
Expand Down
12 changes: 12 additions & 0 deletions tests/hf_olmo/modeling_olmo_test.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import tempfile

import pytest
import torch

from olmo.model import Olmo
Expand Down Expand Up @@ -42,3 +43,14 @@ def test_save_pretrained(model_path: str):
saved_hf_output = saved_hf_model(input_tensor)

torch.testing.assert_allclose(saved_hf_output.logits, hf_output.logits)


@pytest.mark.gpu
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Requires CUDA devices")
def test_auto_device_map_load(model_path: str):
from transformers import AutoModelForCausalLM

from hf_olmo import OLMoForCausalLM, OLMoTokenizerFast # noqa: F401

hf_model = AutoModelForCausalLM.from_pretrained(model_path, device_map="auto")
assert hf_model.device.type == "cuda"

0 comments on commit 0c8bcd5

Please sign in to comment.