We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 20f864e commit 402d703Copy full SHA for 402d703
examples/distributed_inference/tensor_parallel_llama3.py
@@ -24,11 +24,11 @@
24
import torch_tensorrt
25
from torch_tensorrt.dynamo.distributed.utils import (
26
get_tensor_parallel_device_mesh,
27
- initialize_logger,
+ initialize_distributed_logger,
28
)
29
30
device_mesh, _world_size, _rank = get_tensor_parallel_device_mesh()
31
-logger = initialize_logger(_rank, "tensor_parallel_llama3")
+logger = initialize_distributed_logger(_rank, "tensor_parallel_llama3")
32
33
logger.info(f"Starting PyTorch TP example on rank {_rank}.")
34
assert (
0 commit comments