diff --git a/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py b/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py index 9ee84643c9..ecc7fc57e9 100644 --- a/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py +++ b/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py @@ -10,7 +10,7 @@ from torch.fx.node import Argument, Node, Target from torch_tensorrt import ENABLED_FEATURES from torch_tensorrt._features import needs_not_tensorrt_rtx -from torch_tensorrt._utils import is_tensorrt_version_supported, is_thor +from torch_tensorrt._utils import is_tensorrt_version_supported from torch_tensorrt.dynamo._settings import CompilationSettings from torch_tensorrt.dynamo._SourceIR import SourceIR from torch_tensorrt.dynamo.conversion import impl @@ -429,7 +429,7 @@ def index_nonbool_validator( node: Node, settings: Optional[CompilationSettings] = None ) -> bool: # for thor and tensorrt_rtx, we don't support boolean indices, due to nonzero op not supported - if is_thor() or ENABLED_FEATURES.tensorrt_rtx: + if ENABLED_FEATURES.tensorrt_rtx: index = node.args[1] for ind in index: if ind is not None: @@ -3621,18 +3621,10 @@ def aten_ops_full( ) -def nonzero_validator( - node: Node, settings: Optional[CompilationSettings] = None -) -> bool: - return not is_thor() - - # currently nonzero is not supported for tensorrt_rtx # TODO: lan to add the nonzero support once tensorrt_rtx team has added the support -# TODO: apbose to remove the capability validator once thor bug resolve in NGC @dynamo_tensorrt_converter( torch.ops.aten.nonzero.default, - capability_validator=nonzero_validator, supports_dynamic_shapes=True, requires_output_allocator=True, ) diff --git a/tests/py/dynamo/conversion/test_arange_aten.py b/tests/py/dynamo/conversion/test_arange_aten.py index 968611a4ec..7705590e2e 100644 --- a/tests/py/dynamo/conversion/test_arange_aten.py +++ b/tests/py/dynamo/conversion/test_arange_aten.py @@ -5,15 +5,10 @@ import torch_tensorrt from parameterized import parameterized from torch.testing._internal.common_utils import run_tests -from torch_tensorrt._utils import is_tegra_platform, is_thor from .harness import DispatchTestCase -@unittest.skipIf( - is_thor() or is_tegra_platform(), - "Skipped on Thor and Tegra platforms", -) class TestArangeConverter(DispatchTestCase): @parameterized.expand( [ diff --git a/tests/py/dynamo/conversion/test_cumsum_aten.py b/tests/py/dynamo/conversion/test_cumsum_aten.py index a677212cb1..c405d0e2f0 100644 --- a/tests/py/dynamo/conversion/test_cumsum_aten.py +++ b/tests/py/dynamo/conversion/test_cumsum_aten.py @@ -5,15 +5,10 @@ import torch_tensorrt from parameterized import parameterized from torch.testing._internal.common_utils import run_tests -from torch_tensorrt._utils import is_tegra_platform, is_thor from .harness import DispatchTestCase -@unittest.skipIf( - is_thor() or is_tegra_platform(), - "Skipped on Thor and Tegra platforms", -) class TestCumsumConverter(DispatchTestCase): @parameterized.expand( [ diff --git a/tests/py/dynamo/conversion/test_index_aten.py b/tests/py/dynamo/conversion/test_index_aten.py index e34dc48dd5..6ed5f911f2 100644 --- a/tests/py/dynamo/conversion/test_index_aten.py +++ b/tests/py/dynamo/conversion/test_index_aten.py @@ -6,7 +6,6 @@ from parameterized import parameterized from torch.testing._internal.common_utils import run_tests from torch_tensorrt import ENABLED_FEATURES, Input -from torch_tensorrt._utils import is_tegra_platform, is_thor from .harness import DispatchTestCase @@ -114,8 +113,8 @@ def forward(self, input): ] ) @unittest.skipIf( - is_thor() or ENABLED_FEATURES.tensorrt_rtx, - "Skipped on Thor or tensorrt_rtx due to nonzero not supported", + ENABLED_FEATURES.tensorrt_rtx, + "Skipped on tensorrt_rtx due to nonzero not supported", ) def test_index_constant_bool_mask(self, _, index, input): class TestModule(torch.nn.Module): @@ -149,8 +148,8 @@ def forward(self, x, index0): ) @unittest.skipIf( - is_thor() or ENABLED_FEATURES.tensorrt_rtx, - "Skipped on Thor or tensorrt_rtx due to nonzero not supported", + ENABLED_FEATURES.tensorrt_rtx, + "Skipped on tensorrt_rtx due to nonzero not supported", ) def test_index_zero_two_dim_ITensor_mask(self): class TestModule(nn.Module): @@ -163,10 +162,6 @@ def forward(self, x, index0): index0 = torch.tensor([True, False]) self.run_test(TestModule(), [input, index0], enable_passes=True) - @unittest.skipIf( - is_thor(), - "Skipped on Thor due to nonzero not supported", - ) def test_index_zero_index_three_dim_ITensor(self): class TestModule(nn.Module): def forward(self, x, index0): @@ -180,8 +175,8 @@ def forward(self, x, index0): self.run_test(TestModule(), [input, index0]) @unittest.skipIf( - is_thor() or ENABLED_FEATURES.tensorrt_rtx, - "Skipped on Thor or tensorrt_rtx due to nonzero not supported", + ENABLED_FEATURES.tensorrt_rtx, + "Skipped on tensorrt_rtx due to nonzero not supported", ) def test_index_zero_index_three_dim_mask_ITensor(self): class TestModule(nn.Module): @@ -252,7 +247,7 @@ def forward(self, input): @unittest.skipIf( - torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx or is_thor() or is_tegra_platform(), + torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx, "nonzero is not supported for tensorrt_rtx", ) class TestIndexDynamicInputNonDynamicIndexConverter(DispatchTestCase): diff --git a/tests/py/dynamo/conversion/test_nonzero_aten.py b/tests/py/dynamo/conversion/test_nonzero_aten.py index 641cc7c098..b81644ed54 100644 --- a/tests/py/dynamo/conversion/test_nonzero_aten.py +++ b/tests/py/dynamo/conversion/test_nonzero_aten.py @@ -6,13 +6,12 @@ from parameterized import parameterized from torch.testing._internal.common_utils import run_tests from torch_tensorrt import Input -from torch_tensorrt._utils import is_tegra_platform, is_thor from .harness import DispatchTestCase @unittest.skipIf( - torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx or is_thor() or is_tegra_platform(), + torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx, "nonzero is not supported for tensorrt_rtx", ) class TestNonZeroConverter(DispatchTestCase): diff --git a/tests/py/dynamo/conversion/test_sym_size.py b/tests/py/dynamo/conversion/test_sym_size.py index 770dd75504..968ecd322b 100644 --- a/tests/py/dynamo/conversion/test_sym_size.py +++ b/tests/py/dynamo/conversion/test_sym_size.py @@ -4,15 +4,10 @@ import torch.nn as nn from parameterized import parameterized from torch.testing._internal.common_utils import run_tests -from torch_tensorrt._utils import is_thor from .harness import DispatchTestCase -@unittest.skipIf( - is_thor(), - "Skipped on Thor", -) class TestSymSizeConverter(DispatchTestCase): @parameterized.expand( [ diff --git a/tests/py/dynamo/models/test_export_kwargs_serde.py b/tests/py/dynamo/models/test_export_kwargs_serde.py index dabbad3cc8..44a2e0d6c7 100644 --- a/tests/py/dynamo/models/test_export_kwargs_serde.py +++ b/tests/py/dynamo/models/test_export_kwargs_serde.py @@ -1,6 +1,5 @@ # type: ignore import os -import tempfile import unittest import pytest @@ -22,7 +21,7 @@ @pytest.mark.unit @pytest.mark.critical -def test_custom_model(): +def test_custom_model(tmpdir): class net(nn.Module): def __init__(self): super().__init__() @@ -75,7 +74,7 @@ def forward(self, x, b=5, c=None, d=None): ) # Save the module - trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep") + trt_ep_path = os.path.join(tmpdir, "compiled.ep") torchtrt.save(trt_gm, trt_ep_path, retrace=False) # Clean up model env torch._dynamo.reset() @@ -83,7 +82,7 @@ def forward(self, x, b=5, c=None, d=None): @pytest.mark.unit @pytest.mark.critical -def test_custom_model_with_dynamo_trace(): +def test_custom_model_with_dynamo_trace(tmpdir): class net(nn.Module): def __init__(self): super().__init__() @@ -137,7 +136,7 @@ def forward(self, x, b=5, c=None, d=None): ) # Save the module - trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep") + trt_ep_path = os.path.join(tmpdir, "compiled.ep") torchtrt.save(trt_gm, trt_ep_path, retrace=False) # Clean up model env torch._dynamo.reset() @@ -145,7 +144,7 @@ def forward(self, x, b=5, c=None, d=None): @pytest.mark.unit @pytest.mark.critical -def test_custom_model_with_dynamo_trace_dynamic(): +def test_custom_model_with_dynamo_trace_dynamic(tmpdir): class net(nn.Module): def __init__(self): super().__init__() @@ -208,7 +207,7 @@ def forward(self, x, b=5, c=None, d=None): ) # Save the module - trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep") + trt_ep_path = os.path.join(tmpdir, "compiled.ep") torchtrt.save(trt_gm, trt_ep_path, retrace=False) # Clean up model env torch._dynamo.reset() @@ -216,7 +215,7 @@ def forward(self, x, b=5, c=None, d=None): @pytest.mark.unit @pytest.mark.critical -def test_custom_model_with_dynamo_trace_kwarg_dynamic(): +def test_custom_model_with_dynamo_trace_kwarg_dynamic(tmpdir): ir = "dynamo" class net(nn.Module): @@ -298,7 +297,7 @@ def forward(self, x, b=None, c=None, d=None, e=[]): msg=f"CustomKwargs Module TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", ) # Save the module - trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep") + trt_ep_path = os.path.join(tmpdir, "compiled.ep") torchtrt.save(trt_gm, trt_ep_path, retrace=False) # Clean up model env torch._dynamo.reset() @@ -306,7 +305,7 @@ def forward(self, x, b=None, c=None, d=None, e=[]): @pytest.mark.unit @pytest.mark.critical -def test_custom_model_with_dynamo_trace_kwarg_dynamic(): +def test_custom_model_with_dynamo_trace_kwarg_dynamic(tmpdir): ir = "dynamo" class net(nn.Module): @@ -388,7 +387,7 @@ def forward(self, x, b=None, c=None, d=None, e=[]): msg=f"CustomKwargs Module TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}", ) # Save the module - trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep") + trt_ep_path = os.path.join(tmpdir, "compiled.ep") torchtrt.save(trt_gm, trt_ep_path, retrace=False) # Clean up model env torch._dynamo.reset() diff --git a/tests/py/dynamo/models/test_export_serde.py b/tests/py/dynamo/models/test_export_serde.py index c5b007e34b..3209177120 100644 --- a/tests/py/dynamo/models/test_export_serde.py +++ b/tests/py/dynamo/models/test_export_serde.py @@ -1,7 +1,6 @@ import importlib import os import platform -import tempfile import unittest import pytest @@ -17,16 +16,15 @@ if importlib.util.find_spec("torchvision"): import torchvision.models as models -trt_ep_path = os.path.join(tempfile.gettempdir(), "trt.ep") - @pytest.mark.unit @pytest.mark.critical -def test_base_full_compile(ir): +def test_base_full_compile(ir, tmpdir): """ This tests export serde functionality on a base model which is fully TRT convertible """ + trt_ep_path = os.path.join(tmpdir, "trt.ep") class MyModule(torch.nn.Module): def __init__(self): @@ -76,11 +74,12 @@ def forward(self, x): @pytest.mark.unit @pytest.mark.critical -def test_base_full_compile_multiple_outputs(ir): +def test_base_full_compile_multiple_outputs(ir, tmpdir): """ This tests export serde functionality on a base model with multiple outputs which is fully TRT convertible """ + trt_ep_path = os.path.join(tmpdir, "trt.ep") class MyModule(torch.nn.Module): def __init__(self): @@ -136,11 +135,12 @@ def forward(self, x): @pytest.mark.unit @pytest.mark.critical -def test_no_compile(ir): +def test_no_compile(ir, tmpdir): """ This tests export serde functionality on a model which won't convert to TRT because of min_block_size=5 constraint """ + trt_ep_path = os.path.join(tmpdir, "trt.ep") class MyModule(torch.nn.Module): def __init__(self): @@ -195,13 +195,15 @@ def forward(self, x): @pytest.mark.unit @pytest.mark.critical -def test_hybrid_relu_fallback(ir): +def test_hybrid_relu_fallback(ir, tmpdir): """ This tests export save and load functionality on a hybrid model with Pytorch and TRT segments. Relu (unweighted) layer is forced to fallback """ + trt_ep_path = os.path.join(tmpdir, "trt.ep") + class MyModule(torch.nn.Module): def __init__(self): super().__init__() @@ -258,10 +260,13 @@ def forward(self, x): not importlib.util.find_spec("torchvision"), "torchvision is not installed", ) -def test_resnet18(ir): +def test_resnet18(ir, tmpdir): """ This tests export save and load functionality on Resnet18 model """ + + trt_ep_path = os.path.join(tmpdir, "trt.ep") + model = models.resnet18().eval().cuda() input = torch.randn((1, 3, 224, 224)).to("cuda") @@ -303,10 +308,12 @@ def test_resnet18(ir): not importlib.util.find_spec("torchvision"), "torchvision is not installed", ) -def test_resnet18_cpu_offload(ir): +def test_resnet18_cpu_offload(ir, tmpdir): """ This tests export save and load functionality on Resnet18 model """ + trt_ep_path = os.path.join(tmpdir, "trt.ep") + model = models.resnet18().eval().cuda() input = torch.randn((1, 3, 224, 224)).to("cuda") @@ -355,10 +362,13 @@ def test_resnet18_cpu_offload(ir): not importlib.util.find_spec("torchvision"), "torchvision is not installed", ) -def test_resnet18_dynamic(ir): +def test_resnet18_dynamic(ir, tmpdir): """ This tests export save and load functionality on Resnet18 model """ + + trt_ep_path = os.path.join(tmpdir, "trt.ep") + model = models.resnet18().eval().cuda() input = torch.randn((1, 3, 224, 224)).to("cuda") @@ -395,10 +405,13 @@ def test_resnet18_dynamic(ir): @unittest.skipIf( not importlib.util.find_spec("torchvision"), "torchvision not installed" ) -def test_resnet18_torch_exec_ops_serde(ir): +def test_resnet18_torch_exec_ops_serde(ir, tmpdir): """ This tests export save and load functionality on Resnet18 model """ + + trt_ep_path = os.path.join(tmpdir, "trt.ep") + model = models.resnet18().eval().cuda() input = torch.randn((1, 3, 224, 224)).to("cuda") @@ -426,12 +439,14 @@ def test_resnet18_torch_exec_ops_serde(ir): @pytest.mark.unit @pytest.mark.critical -def test_hybrid_conv_fallback(ir): +def test_hybrid_conv_fallback(ir, tmpdir): """ This tests export save and load functionality on a hybrid model where a conv (a weighted layer) has been forced to fallback to Pytorch. """ + trt_ep_path = os.path.join(tmpdir, "trt.ep") + class MyModule(torch.nn.Module): def __init__(self): super().__init__() @@ -487,12 +502,14 @@ def forward(self, x): @pytest.mark.unit @pytest.mark.critical -def test_hybrid_conv_fallback_cpu_offload(ir): +def test_hybrid_conv_fallback_cpu_offload(ir, tmpdir): """ This tests export save and load functionality on a hybrid model where a conv (a weighted layer) has been forced to fallback to Pytorch. """ + trt_ep_path = os.path.join(tmpdir, "trt.ep") + class MyModule(torch.nn.Module): def __init__(self): super().__init__() @@ -549,13 +566,15 @@ def forward(self, x): @pytest.mark.unit @pytest.mark.critical -def test_arange_export(ir): +def test_arange_export(ir, tmpdir): """ This tests export save and load functionality on a arange static graph Here the arange output is a static constant (which is registered as input to the graph) in the exporter. """ + trt_ep_path = os.path.join(tmpdir, "trt.ep") + class MyModule(torch.nn.Module): def __init__(self): super().__init__() @@ -607,7 +626,7 @@ def forward(self, x): @pytest.mark.unit -def test_save_load_ts(ir): +def test_save_load_ts(ir, tmpdir): """ This tests save/load API on Torchscript format (model still compiled using dynamo workflow) """ @@ -624,6 +643,7 @@ def forward(self, x): mul = relu * 0.5 return mul + ts_path = os.path.join(tmpdir, "trt.ts") model = MyModule().eval().cuda() input = torch.randn((1, 3, 224, 224)).to("cuda") @@ -641,9 +661,9 @@ def forward(self, x): ) outputs_trt = trt_gm(input) # Save it as torchscript representation - torchtrt.save(trt_gm, "./trt.ts", output_format="torchscript", inputs=[input]) + torchtrt.save(trt_gm, ts_path, output_format="torchscript", inputs=[input]) - trt_ts_module = torchtrt.load("./trt.ts") + trt_ts_module = torchtrt.load(ts_path) outputs_trt_deser = trt_ts_module(input) cos_sim = cosine_similarity(outputs_trt, outputs_trt_deser) diff --git a/tests/py/dynamo/models/test_model_refit.py b/tests/py/dynamo/models/test_model_refit.py index e6b7f6e2a4..813f2f848d 100644 --- a/tests/py/dynamo/models/test_model_refit.py +++ b/tests/py/dynamo/models/test_model_refit.py @@ -1,6 +1,5 @@ import importlib import os -import tempfile import unittest import pytest @@ -531,8 +530,9 @@ def test_refit_one_engine_bert_with_weightmap(): "Refit feature is not supported in Python 3.13 or higher", ) @pytest.mark.unit -def test_refit_one_engine_inline_runtime_with_weightmap(): - trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep") +def test_refit_one_engine_inline_runtime_with_weightmap(tmpdir): + + trt_ep_path = os.path.join(tmpdir, "compiled.ep") model = models.resnet18(pretrained=False).eval().to("cuda") model2 = models.resnet18(pretrained=True).eval().to("cuda") inputs = [torch.randn((1, 3, 224, 224)).to("cuda")] @@ -888,8 +888,8 @@ def test_refit_one_engine_bert_without_weightmap(): "Refit feature is not supported in Python 3.13 or higher", ) @pytest.mark.unit -def test_refit_one_engine_inline_runtime_without_weightmap(): - trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep") +def test_refit_one_engine_inline_runtime_without_weightmap(tmpdir): + trt_ep_path = os.path.join(tmpdir, "compiled.ep") model = models.resnet18(pretrained=True).eval().to("cuda") model2 = models.resnet18(pretrained=False).eval().to("cuda") inputs = [torch.randn((1, 3, 224, 224)).to("cuda")] diff --git a/tests/py/dynamo/models/test_reexport.py b/tests/py/dynamo/models/test_reexport.py index 7c414def52..9636c9d91a 100644 --- a/tests/py/dynamo/models/test_reexport.py +++ b/tests/py/dynamo/models/test_reexport.py @@ -1,6 +1,5 @@ import importlib import os -import tempfile import unittest import pytest @@ -14,12 +13,10 @@ if importlib.util.find_spec("torchvision"): import torchvision.models as models -trt_ep_path = os.path.join(tempfile.gettempdir(), "trt.ep") - @pytest.mark.unit @pytest.mark.critical -def test_base_full_compile(ir): +def test_base_full_compile(ir, tmpdir): """ This tests export serde functionality on a base model which is fully TRT convertible @@ -56,6 +53,8 @@ def forward(self, x): # Reexport trt_exp_program = torch.export.export(trt_module, (input,), strict=False) + trt_ep_path = os.path.join(tmpdir, "trt.ep") + torch.export.save(trt_exp_program, trt_ep_path) deser_trt_module = torchtrt.load(trt_ep_path).module() @@ -76,12 +75,14 @@ def forward(self, x): @pytest.mark.unit @pytest.mark.critical -def test_base_full_compile_multiple_outputs(ir): +def test_base_full_compile_multiple_outputs(ir, tmpdir): """ This tests export serde functionality on a base model with multiple outputs which is fully TRT convertible """ + trt_ep_path = os.path.join(tmpdir, "trt.ep") + class MyModule(torch.nn.Module): def __init__(self): super().__init__() @@ -139,7 +140,7 @@ def forward(self, x): @pytest.mark.unit @pytest.mark.critical -def test_no_compile(ir): +def test_no_compile(ir, tmpdir): """ This tests export serde functionality on a model which won't convert to TRT because of min_block_size=5 constraint @@ -157,6 +158,7 @@ def forward(self, x): relu = self.relu(conv) return conv, relu + trt_ep_path = os.path.join(tmpdir, "trt.ep") model = MyModule().eval().cuda() input = torch.randn((1, 3, 224, 224)).to("cuda") @@ -200,13 +202,15 @@ def forward(self, x): @pytest.mark.unit -def test_hybrid_relu_fallback(ir): +def test_hybrid_relu_fallback(ir, tmpdir): """ This tests export save and load functionality on a hybrid model with Pytorch and TRT segments. Relu (unweighted) layer is forced to fallback """ + trt_ep_path = os.path.join(tmpdir, "trt.ep") + class MyModule(torch.nn.Module): def __init__(self): super().__init__() @@ -266,10 +270,13 @@ def forward(self, x): not importlib.util.find_spec("torchvision"), "torchvision is not installed", ) -def test_resnet18(ir): +def test_resnet18(ir, tmpdir): """ This tests export save and load functionality on Resnet18 model """ + + trt_ep_path = os.path.join(tmpdir, "trt.ep") + model = models.resnet18().eval().cuda() input = torch.randn((1, 3, 224, 224)).to("cuda") @@ -310,12 +317,14 @@ def test_resnet18(ir): @pytest.mark.unit -def test_hybrid_conv_fallback(ir): +def test_hybrid_conv_fallback(ir, tmpdir): """ This tests export save and load functionality on a hybrid model where a conv (a weighted layer) has been forced to fallback to Pytorch. """ + trt_ep_path = os.path.join(tmpdir, "trt.ep") + class MyModule(torch.nn.Module): def __init__(self): super().__init__() @@ -372,13 +381,15 @@ def forward(self, x): @pytest.mark.unit -def test_arange_export(ir): +def test_arange_export(ir, tmpdir): """ This tests export save and load functionality on a arange static graph Here the arange output is a static constant (which is registered as input to the graph) in the exporter. """ + trt_ep_path = os.path.join(tmpdir, "trt.ep") + class MyModule(torch.nn.Module): def __init__(self): super().__init__() @@ -436,10 +447,12 @@ def forward(self, x): not importlib.util.find_spec("torchvision"), "torchvision is not installed", ) -def test_resnet18_dynamic(ir): +def test_resnet18_dynamic(ir, tmpdir): """ This tests export save and load functionality on Resnet18 model with dynamic shapes """ + + trt_ep_path = os.path.join(tmpdir, "trt.ep") model = models.resnet18().eval().cuda() input_bs2 = torch.randn((2, 3, 224, 224)).to("cuda") @@ -510,10 +523,12 @@ def test_resnet18_dynamic(ir): not importlib.util.find_spec("torchvision"), "torchvision is not installed", ) -def test_resnet18_dynamic_fallback(ir): +def test_resnet18_dynamic_fallback(ir, tmpdir): """ This tests export save and load functionality on Resnet18 model with dynamic shapes and fallback """ + + trt_ep_path = os.path.join(tmpdir, "trt.ep") model = models.resnet18().eval().cuda() input_bs2 = torch.randn((2, 3, 224, 224)).to("cuda") @@ -584,11 +599,13 @@ def test_resnet18_dynamic_fallback(ir): @pytest.mark.unit -def test_bitwise_and_dynamic_fallback(ir): +def test_bitwise_and_dynamic_fallback(ir, tmpdir): """ This tests export save and load functionality on a bitwise_and model with dynamic shapes and fallback """ + trt_ep_path = os.path.join(tmpdir, "trt.ep") + class bitwise_and(torch.nn.Module): def forward(self, lhs_val, rhs_val): return torch.ops.aten.bitwise_and.Tensor(lhs_val, rhs_val) @@ -664,11 +681,13 @@ def forward(self, lhs_val, rhs_val): @pytest.mark.unit -def test_random_dynamic_fallback(ir): +def test_random_dynamic_fallback(ir, tmpdir): """ This tests export save and load functionality on a random model with dynamic shapes and fallback """ + trt_ep_path = os.path.join(tmpdir, "trt.ep") + class NeuralNetwork(nn.Module): def __init__(self): super(NeuralNetwork, self).__init__() diff --git a/tests/py/dynamo/runtime/test_004_weight_streaming.py b/tests/py/dynamo/runtime/test_004_weight_streaming.py index ac6b43730d..fe0ae649bc 100644 --- a/tests/py/dynamo/runtime/test_004_weight_streaming.py +++ b/tests/py/dynamo/runtime/test_004_weight_streaming.py @@ -6,7 +6,6 @@ import torch_tensorrt as torchtrt from parameterized import parameterized from torch.testing._internal.common_utils import TestCase, run_tests -from torch_tensorrt._utils import is_tegra_platform from torch_tensorrt.dynamo.utils import prepare_inputs INPUT_SIZE = (64, 100) @@ -32,10 +31,6 @@ def forward(self, x): return out -@unittest.skipIf( - is_tegra_platform(), - "Skipped on Tegra platforms", -) class TestWeightStreamingPython(TestCase): @parameterized.expand( [ diff --git a/tests/py/dynamo/runtime/test_output_allocator.py b/tests/py/dynamo/runtime/test_output_allocator.py index ce00135c99..c94020705c 100644 --- a/tests/py/dynamo/runtime/test_output_allocator.py +++ b/tests/py/dynamo/runtime/test_output_allocator.py @@ -5,7 +5,6 @@ import torch_tensorrt from parameterized import parameterized from torch.testing._internal.common_utils import TestCase, run_tests -from torch_tensorrt._utils import is_tegra_platform, is_thor from ..testing_utilities import DECIMALS_OF_AGREEMENT @@ -45,7 +44,7 @@ def forward(self, input): @unittest.skipIf( - torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx or is_thor() or is_tegra_platform(), + torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx, "TensorRT RTX does not support nonzero which are required for this test", ) class TestOutputAllocatorStaticModel(TestCase): @@ -158,7 +157,7 @@ def test_combination_of_cg_and_oa(self, _, use_python_runtime): @unittest.skipIf( - torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx or is_thor() or is_tegra_platform(), + torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx, "TensorRT RTX does not support nonzero which are required for this test", ) class TestOutputAllocatorDDSModel(TestCase): @@ -268,7 +267,7 @@ def test_combination_of_cg_and_oa(self, _, use_python_runtime): @unittest.skipIf( - torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx or is_thor() or is_tegra_platform(), + torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx, "TensorRT RTX does not support nonzero which are required for this test", ) class TestOutputAllocatorDDSOpWithReductionOpModel(TestCase): @@ -382,7 +381,7 @@ def test_combination_of_cg_and_oa(self, _, use_python_runtime): @unittest.skipIf( - torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx or is_thor() or is_tegra_platform(), + torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx, "TensorRT RTX does not support nonzero which are required for this test", ) class TestOutputAllocatorDDSModelWithGraphBreak(TestCase):