We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 54113c2 commit 9f8e241Copy full SHA for 9f8e241
tests/kernels/moe/test_pplx_moe.py
@@ -585,14 +585,14 @@ def _pplx_moe(
585
topk_weight, topk_ids, _ = fused_topk(a, score, topk, False)
586
torch_output = torch_moe2(a, w1, w2, topk_weight, topk_ids)
587
pplx_output = pplx_moe(pgi, dp_size, a, w1, w2, topk_weight, topk_ids)
588
- batched_output = _batched_moe(pgi, dp_size, a, w1, w2, topk_weight,
589
- topk_ids)
+ #batched_output = _batched_moe(pgi, dp_size, a, w1, w2, topk_weight,
+ # topk_ids)
590
591
torch_output = chunk_by_rank(torch_output, pgi.rank,
592
pgi.world_size).to(pplx_output.device)
593
594
torch.testing.assert_close(pplx_output, torch_output, atol=2e-2, rtol=0)
595
- torch.testing.assert_close(batched_output, torch_output, atol=2e-2, rtol=0)
+ #torch.testing.assert_close(batched_output, torch_output, atol=2e-2, rtol=0)
596
597
nvshmem_finalize()
598
0 commit comments