77import importlib .util
88import inspect
99import platform
10+ import sys
1011from pathlib import Path
1112from typing import Any , Callable
1213
6061 npu_device_count = torch .npu .device_count ()
6162
6263
64+ @pytest .mark .skipif (
65+ sys .version_info > (3 , 14 ), reason = "torch.compile is not supported on python 3.14+ "
66+ )
6367def test_vmap_compile ():
6468 # Since we monkey patch vmap we need to make sure compile is happy with it
6569 def func (x , y ):
@@ -76,6 +80,9 @@ def func(x, y):
7680@pytest .mark .skipif (
7781 TORCH_VERSION < version .parse ("2.4.0" ), reason = "requires torch>=2.4"
7882)
83+ @pytest .mark .skipif (
84+ sys .version_info > (3 , 14 ), reason = "torch.compile is not supported on python 3.14+ "
85+ )
7986@pytest .mark .parametrize ("mode" , [None , "reduce-overhead" ])
8087class TestTD :
8188 def test_tensor_output (self , mode ):
@@ -271,7 +278,7 @@ def make_td_with_names(data):
271278 make_td_with_names_c (data_dict )
272279
273280 @pytest .mark .skipif (
274- not torch .cuda .is_available () and not is_npu_available () , reason = "cuda or npu required to test device casting"
281+ not torch .cuda .is_available (), reason = "cuda required to test device casting"
275282 )
276283 @pytest .mark .parametrize ("has_device" , [True , False ])
277284 def test_to (self , has_device , mode ):
@@ -292,6 +299,9 @@ def test_to_device(td):
292299 assert td_device_c .batch_size == td .batch_size
293300 assert td_device_c .device == torch .device (device )
294301
302+ @pytest .mark .skipif (
303+ is_npu_available (), reason = "torch.device in torch.compile is not supported on NPU currently."
304+ )
295305 def test_lock (self , mode ):
296306 def locked_op (td ):
297307 # Adding stuff uses cache, check that this doesn't break
@@ -366,6 +376,9 @@ class MyClass:
366376@pytest .mark .skipif (
367377 TORCH_VERSION < version .parse ("2.4.0" ), reason = "requires torch>=2.4"
368378)
379+ @pytest .mark .skipif (
380+ sys .version_info > (3 , 14 ), reason = "torch.compile is not supported on python 3.14+ "
381+ )
369382@pytest .mark .parametrize ("mode" , [None , "reduce-overhead" ])
370383class TestTC :
371384 def test_tc_tensor_output (self , mode ):
@@ -558,7 +571,7 @@ def clone(td: TensorDict):
558571 assert clone_c (data ).a .b is data .a .b
559572
560573 @pytest .mark .skipif (
561- not torch .cuda .is_available () and not is_npu_available () , reason = "cuda or npu required to test device casting"
574+ not torch .cuda .is_available (), reason = "cuda required to test device casting"
562575 )
563576 @pytest .mark .parametrize ("has_device" , [True , False ])
564577 def test_tc_to (self , has_device , mode ):
@@ -579,6 +592,9 @@ def test_to_device(tc):
579592 assert tc_device_c .batch_size == data .batch_size
580593 assert tc_device_c .device == torch .device (device )
581594
595+ @pytest .mark .skipif (
596+ is_npu_available (), reason = "torch.device in torch.compile is not supported on NPU currently."
597+ )
582598 def test_tc_lock (self , mode ):
583599 def locked_op (tc ):
584600 # Adding stuff uses cache, check that this doesn't break
@@ -630,6 +646,9 @@ def func_c_mytd():
630646@pytest .mark .skipif (
631647 TORCH_VERSION < version .parse ("2.4.0" ), reason = "requires torch>=2.4"
632648)
649+ @pytest .mark .skipif (
650+ sys .version_info > (3 , 14 ), reason = "torch.compile is not supported on python 3.14+ "
651+ )
633652@pytest .mark .parametrize ("mode" , [None , "reduce-overhead" ])
634653class TestNN :
635654 def test_func (self , mode ):
@@ -734,6 +753,9 @@ def test_prob_module_with_kwargs(self, mode):
734753@pytest .mark .skipif (
735754 TORCH_VERSION <= version .parse ("2.4.0" ), reason = "requires torch>2.4"
736755)
756+ @pytest .mark .skipif (
757+ sys .version_info > (3 , 14 ), reason = "torch.compile is not supported on python 3.14+ "
758+ )
737759@pytest .mark .parametrize ("mode" , [None , "reduce-overhead" ])
738760class TestFunctional :
739761 def test_functional_error (self , mode ):
@@ -1032,6 +1054,9 @@ def to_numpy(tensor):
10321054 (TORCH_VERSION <= version .parse ("2.7.0" )) and _IS_OSX ,
10331055 reason = "requires torch>=2.7 ons OSX" ,
10341056)
1057+ @pytest .mark .skipif (
1058+ sys .version_info > (3 , 14 ), reason = "torch.compile is not supported on python 3.14+ "
1059+ )
10351060@pytest .mark .parametrize ("compiled" , [False , True ])
10361061class TestCudaGraphs :
10371062 @pytest .fixture (scope = "class" , autouse = True )
@@ -1251,7 +1276,7 @@ def test_state_dict(self, compiled):
12511276 torch .testing .assert_close (y1 , y2 )
12521277
12531278
1254- @pytest .mark .skipif (not torch .cuda .is_available () and not is_npu_available () , reason = "cuda or npu is not available" )
1279+ @pytest .mark .skipif (not torch .cuda .is_available (), reason = "cuda is not available" )
12551280class TestCompileNontensor :
12561281 # Same issue with the decorator @tensorclass version
12571282 @pytest .fixture (scope = "class" )
0 commit comments