@@ -266,14 +266,14 @@ def _test_op(m, n, k, split_k, do_gather, do_scatter, inner_expt_opt, do_gamma,
266266 expt_is_inner = (inner_expt_opt is not None )
267267 if expt_is_inner :
268268 if mode != "ragged" :
269- pytest .skip ("inner_expt_opt only meaningful with ragged" )
269+ pytest .xfail ("inner_expt_opt only meaningful with ragged" )
270270 if "mx" in act_dtype_str and inner_expt_opt != "pad_a" :
271- pytest .skip ("inner_expt_opt and act mx only supported with pad_a" )
271+ pytest .xfail ("inner_expt_opt and act mx only supported with pad_a" )
272272 if "mx" in weight_dtype_str :
273273 if inner_expt_opt != "pad_b" :
274- pytest .skip ("inner_expt_opt and weight mx only supported with pad_b" )
274+ pytest .xfail ("inner_expt_opt and weight mx only supported with pad_b" )
275275 if is_persistent and not hbm_swizzling :
276- pytest .skip ("FIXME: Fatal Python error: Aborted" )
276+ pytest .xfail ("FIXME: Fatal Python error: Aborted" )
277277 if is_hip ():
278278 if act_dtype_str == "bfloat16" :
279279 pytest .skip ("FIXME: failed to translate module to LLVM IR" )
@@ -283,7 +283,7 @@ def _test_op(m, n, k, split_k, do_gather, do_scatter, inner_expt_opt, do_gamma,
283283 if is_cuda () and torch .cuda .get_device_capability ()[0 ] < 10 :
284284 pytest .skip ("transposed mxfp weight not supported with cuda capability < 10" )
285285 if block_m == 16 :
286- pytest .skip ("PassManager::run failed from Triton compiler" )
286+ pytest .xfail ("PassManager::run failed from Triton compiler" )
287287 # TODO: should construct the test case differently rather than overriding here
288288 if "float8" in weight_dtype_str and is_cuda () and torch .cuda .get_device_capability ()[0 ] < 10 :
289289 b_transpose = True
0 commit comments