diff --git a/test/xpu/skip_list_common.py b/test/xpu/skip_list_common.py index 185223e55..1564254e8 100644 --- a/test/xpu/skip_list_common.py +++ b/test/xpu/skip_list_common.py @@ -658,11 +658,7 @@ "nn/test_pooling_xpu.py": None, "nn/test_dropout_xpu.py": None, "test_dataloader_xpu.py": None, - "test_tensor_creation_ops_xpu.py": ( - # CPU only (vs Numpy). CUDA skips these cases since non-deterministic results are outputed for inf and nan. - "test_float_to_int_conversion_finite_xpu_int8", - "test_float_to_int_conversion_finite_xpu_int16", - ), + "test_tensor_creation_ops_xpu.py": None, "test_autocast_xpu.py": None, "test_autograd_xpu.py": ( # AttributeError: module 'torch.xpu' has no attribute diff --git a/test/xpu/test_tensor_creation_ops_xpu.py b/test/xpu/test_tensor_creation_ops_xpu.py index 077fbb6ad..21b12d784 100644 --- a/test/xpu/test_tensor_creation_ops_xpu.py +++ b/test/xpu/test_tensor_creation_ops_xpu.py @@ -1226,8 +1226,10 @@ def test_float_to_int_conversion_finite(self, device, dtype): vals = (min, -2, -1.5, -0.5, 0, 0.5, 1.5, 2, max) refs = None if self.device_type == "cuda" or self.device_type == "xpu": - if torch.version.hip: + if torch.version.hip or torch.version.xpu: # HIP min float -> int64 conversion is divergent + # XPU min float -> int8 conversion is divergent + # XPU min float -> int16 conversion is divergent vals = (-2, -1.5, -0.5, 0, 0.5, 1.5, 2) else: vals = (min, -2, -1.5, -0.5, 0, 0.5, 1.5, 2)