Skip to content

Commit 9ee5ad8

Browse files
syuonibobboli
authored andcommitted
Fix test_fused_moe_nvfp4
Refactor float16 checks for CUTEDSL backend in test. Signed-off-by: Enwei Zhu <[email protected]>
1 parent d3a6102 commit 9ee5ad8

File tree

1 file changed

+7
-2
lines changed

1 file changed

+7
-2
lines changed

tests/unittest/_torch/modules/test_fused_moe.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1353,8 +1353,13 @@ def test_fused_moe_nvfp4(dtype, moe_backend):
13531353

13541354
if moe_backend == "TRTLLM" and dtype == torch.float16:
13551355
pytest.skip("TRTLLM NVFP4 MoE backend does not support float16 yet")
1356-
if moe_backend == "CUTEDSL" and dtype == torch.float16:
1357-
pytest.skip("CUTEDSL NVFP4 MoE backend does not support float16 yet")
1356+
if moe_backend == "CUTEDSL":
1357+
if dtype == torch.float16:
1358+
pytest.skip(
1359+
"CUTEDSL NVFP4 MoE backend does not support float16 yet")
1360+
if get_sm_version() != 100:
1361+
pytest.skip(
1362+
"CUTEDSL NVFP4 MoE backend is only supported on SM 100 GPUs")
13581363

13591364
test_all_kernels = True
13601365
if get_sm_version() == 120:

0 commit comments

Comments
 (0)