2828from helion ._testing import RefEagerTestDisabled
2929from helion ._testing import TestCase
3030from helion ._testing import import_path
31+ from helion ._testing import skipIfCpu
3132from helion ._testing import skipIfRocm
3233from helion .autotuner import DifferentialEvolutionSearch
3334from helion .autotuner import PatternSearch
@@ -316,6 +317,7 @@ def add(a, b):
316317 )
317318 torch .testing .assert_close (add (* args ), sum (args ))
318319
320+ @skipIfCpu ("fails on Triton CPU backend" )
319321 def test_run_finite_search (self ):
320322 @helion .kernel (
321323 configs = [
@@ -347,6 +349,7 @@ def add(a, b):
347349 torch .testing .assert_close (add (* args ), sum (args ))
348350
349351 @skipIfRocm ("too slow on rocm" )
352+ @skipIfCpu ("TritonError: Error from Triton code" )
350353 def test_random_search (self ):
351354 args = (
352355 torch .randn ([512 , 512 ], device = DEVICE ),
@@ -436,6 +439,7 @@ def diff_count(flat):
436439 ]
437440 self .assertEqual (sorted (pair_neighbors ), sorted (expected ))
438441
442+ @skipIfCpu ("fails on Triton CPU backend" )
439443 def test_accuracy_check_filters_bad_config_wrong_output (self ) -> None :
440444 bad_config = helion .Config (block_sizes = [1 ], num_warps = 8 )
441445 good_config = helion .Config (block_sizes = [1 ], num_warps = 4 )
@@ -509,6 +513,7 @@ def make_bad_config_produce_wrong_output(
509513 run_mode ("fork" , expect_error = False )
510514 run_mode ("spawn" , expect_error = True )
511515
516+ @skipIfCpu ("fails on Triton CPU backend" )
512517 def test_accuracy_check_filters_bad_config_wrong_arg_mutation (self ) -> None :
513518 bad_config = helion .Config (block_sizes = [1 ], num_warps = 8 )
514519 good_config = helion .Config (block_sizes = [1 ], num_warps = 4 )
@@ -591,6 +596,7 @@ def wrong_fn(*fn_args, **fn_kwargs):
591596 run_mode ("fork" , expect_error = False )
592597 run_mode ("spawn" , expect_error = True )
593598
599+ @skipIfCpu ("fails on Triton CPU backend" )
594600 def test_autotune_baseline_fn (self ) -> None :
595601 """Test that custom baseline function is used for accuracy checking."""
596602 config1 = helion .Config (block_sizes = [32 ], num_warps = 4 )
@@ -631,6 +637,7 @@ def add(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
631637 # Verify the result is correct
632638 torch .testing .assert_close (result , args [0 ] + args [1 ])
633639
640+ @skipIfCpu ("fails on Triton CPU backend" )
634641 def test_autotune_baseline_fn_filters_bad_config (self ) -> None :
635642 """Test that custom baseline function correctly filters incorrect configs."""
636643 bad_config = helion .Config (block_sizes = [1 ], num_warps = 8 )
@@ -729,6 +736,7 @@ def add(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
729736 ):
730737 add (* args )
731738
739+ @skipIfCpu ("fails on Triton CPU backend" )
732740 def test_max_generations (self ):
733741 """Autotuner max generation respects explicit kwargs then setting override."""
734742
@@ -772,6 +780,7 @@ def add(a, b):
772780 result = add (* args )
773781 torch .testing .assert_close (result , sum (args ))
774782
783+ @skipIfCpu ("fails on Triton CPU backend" )
775784 def test_autotune_effort_quick (self ):
776785 """Test that quick effort profile uses correct default values."""
777786 # Get the quick profile defaults
@@ -907,6 +916,7 @@ def add(a, b):
907916 return search .samples [0 ]
908917
909918 @skipIfRocm ("accuracy difference" )
919+ @skipIfCpu ("fails on Triton CPU backend" )
910920 def test_autotune_random_seed_from_env_var (self ) -> None :
911921 # same env var value -> same random sample
912922 with patch .dict (
@@ -931,6 +941,7 @@ def test_autotune_random_seed_from_env_var(self) -> None:
931941 self .assertNotEqual (first , second )
932942
933943 @skipIfRocm ("accuracy difference" )
944+ @skipIfCpu ("fails on Triton CPU backend" )
934945 def test_autotune_random_seed_from_settings (self ) -> None :
935946 # same autotune_random_seed setting -> same random sample
936947 first = self ._autotune_and_record (autotune_random_seed = 4242 )
0 commit comments