Skip to content

Commit

Permalink
Fix Pytorch integration test postmerge test (#363)
Browse files Browse the repository at this point in the history
* Extend LD_LIBRARY_PATH with pytorch backend path

* Add skipif for GPU in test_pytorch_backend

* Extend match_representations type coercion to support torch tensors

* Run gpu-postmerge on pull request temporarily

* Use `name` instead of `to` for numpy/cupy dtype conversion

* Remove pull_request from postmerge-gpu workflow
  • Loading branch information
oliverholworthy authored Jun 8, 2023
1 parent ed7009c commit 399ef23
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 4 deletions.
30 changes: 27 additions & 3 deletions merlin/systems/triton/conversions.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,9 +105,9 @@ def match_representations(schema: Schema, dict_array: Dict[str, Any]) -> Dict[st
aligned[offs_name] = offsets

if dtype != md.unknown:
aligned[vals_name] = aligned[vals_name].astype(dtype.to_numpy)
aligned[vals_name] = _astype(aligned[vals_name], dtype)

aligned[offs_name] = aligned[offs_name].astype("int32")
aligned[offs_name] = _astype(aligned[offs_name], md.dtype("int32"))
else:
try:
# Look for values and offsets that already exist,
Expand All @@ -120,7 +120,7 @@ def match_representations(schema: Schema, dict_array: Dict[str, Any]) -> Dict[st
aligned[col_name] = dict_array[col_name]

if dtype != md.unknown:
aligned[col_name] = aligned[col_name].astype(dtype.to_numpy)
aligned[col_name] = _astype(aligned[col_name], dtype)

return aligned

Expand All @@ -139,6 +139,30 @@ def _from_values_offsets(values, offsets, shape):
return values.reshape(new_shape)


@singledispatch
def _astype(value, target_dtype):
raise NotImplementedError(f"_to_dtype not implemented for {type(value)}")


@_astype.register
def _(array: np.ndarray, target_dtype: md.DType):
return array.astype(target_dtype.name)


if cp:

@_astype.register
def _(array: cp.ndarray, target_dtype: md.DType):
return array.astype(target_dtype.name)


if torch:

@_astype.register
def _(tensor: torch.Tensor, target_dtype: md.DType):
return tensor.to(target_dtype.to("torch"))


@singledispatch
def _to_values_offsets(values):
"""Convert array to values/offsets representation
Expand Down
2 changes: 2 additions & 0 deletions tests/integration/t4r/test_pytorch_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
tritonclient = pytest.importorskip("tritonclient")
grpcclient = pytest.importorskip("tritonclient.grpc")

from merlin.core.compat import HAS_GPU # noqa
from merlin.core.dispatch import make_df # noqa
from merlin.systems.dag import Ensemble # noqa
from merlin.systems.dag.ops.pytorch import PredictPyTorch # noqa
Expand All @@ -39,6 +40,7 @@


@pytest.mark.skipif(not TRITON_SERVER_PATH, reason="triton server not found")
@pytest.mark.skipif(not HAS_GPU, reason="GPU Device required for test")
def test_serve_t4r_with_torchscript(tmpdir):
# ===========================================
# Generate training data
Expand Down
2 changes: 1 addition & 1 deletion tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ sitepackages=true
; need to add some back.
setenv =
TF_GPU_ALLOCATOR=cuda_malloc_async
LD_LIBRARY_PATH=/opt/tritonserver/backends/pytorch
LD_LIBRARY_PATH=/opt/tritonserver/backends/pytorch:{env:LD_LIBRARY_PATH}
passenv =
OPAL_PREFIX
deps =
Expand Down

0 comments on commit 399ef23

Please sign in to comment.