Skip to content

Commit faf92bd

Browse files
committed
addressing review comments- making dynamic=None in torch.compile and changing the option description
1 parent ddf72cd commit faf92bd

File tree

4 files changed

+5
-6
lines changed

4 files changed

+5
-6
lines changed

examples/distributed_inference/tensor_parallel_simple_example.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def compile_tp_model(tp_model, backend):
3131

3232
try:
3333
return torch.compile(
34-
tp_model, backend=backend, options=compile_options, dynamic=False
34+
tp_model, backend=backend, options=compile_options, dynamic=None
3535
)
3636
except RuntimeError as e:
3737
if (
@@ -43,7 +43,7 @@ def compile_tp_model(tp_model, backend):
4343
)
4444
compile_options["use_distributed_mode_trace"] = True
4545
return torch.compile(
46-
tp_model, backend=backend, options=compile_options, dynamic=False
46+
tp_model, backend=backend, options=compile_options, dynamic=None
4747
)
4848
else:
4949
logger.debug("The distributed model fails with the following error")

py/torch_tensorrt/dynamo/_settings.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ class CompilationSettings:
9292
enable_weight_streaming (bool): Enable weight streaming.
9393
enable_cross_compile_for_windows (bool): By default this is False means TensorRT engines can only be executed on the same platform where they were built.
9494
True will enable cross-platform compatibility which allows the engine to be built on Linux and run on Windows
95-
use_distributed_mode_trace (bool): Using aot_autograd to trace the graph. Enable this only if the model includes distributed operations
95+
use_distributed_mode_trace (bool): Using aot_autograd to trace the graph. This is enabled when DTensors are present in distributed model
9696
"""
9797

9898
enabled_precisions: Set[dtype] = field(default_factory=lambda: ENABLED_PRECISIONS)

tests/py/dynamo/distributed/test_distributed_simple_example.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ def compile_tp_model(tp_model, backend):
2727

2828
try:
2929
return torch.compile(
30-
tp_model, backend=backend, options=compile_options, dynamic=False
30+
tp_model, backend=backend, options=compile_options, dynamic=None
3131
)
3232
except RuntimeError as e:
3333
if (
@@ -39,7 +39,7 @@ def compile_tp_model(tp_model, backend):
3939
)
4040
compile_options["use_distributed_mode_trace"] = True
4141
return torch.compile(
42-
tp_model, backend=backend, options=compile_options, dynamic=False
42+
tp_model, backend=backend, options=compile_options, dynamic=None
4343
)
4444
else:
4545
logger.debug("The distributed model fails with the following error")

tests/py/dynamo/distributed/test_nccl_ops.sh

-1
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,6 @@ fi
8888
URL="https://pypi.nvidia.com/tensorrt-llm/$FILE"
8989
echo "Downloading $FILE from $URL..."
9090
91-
echo "Downloading here...."
9291
#Installing wget
9392
ensure_installed wget
9493

0 commit comments

Comments
 (0)