Skip to content

Commit 1e19be7

Browse files
authored
[DOC] Fix documentation pipeline (#6702)
1. Change the default build directory from `python/` to the root of triton. 2. Update on the CI runners. 3. Increase the relative tolerance of 08-group-gemm tutorial due to its failure on some GPUs using mmav2. Tested https://github.com/triton-lang/triton/actions/runs/14816446953
1 parent 553d01d commit 1e19be7

File tree

3 files changed

+5
-5
lines changed

3 files changed

+5
-5
lines changed

.github/workflows/documentation.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ permissions: read-all
88

99
jobs:
1010
Build-Documentation:
11-
runs-on: [a100-runner-set]
11+
runs-on: [nvidia-a100]
1212
timeout-minutes: 30
1313

1414
steps:

docs/conf.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ def get_cmake_dir():
4343
plat_name = sysconfig.get_platform()
4444
python_version = sysconfig.get_python_version()
4545
dir_name = f"cmake.{plat_name}-{sys.implementation.name}-{python_version}"
46-
cmake_dir = Path("../python") / "build" / dir_name
46+
cmake_dir = Path("../build") / dir_name
4747
return cmake_dir
4848

4949

@@ -100,7 +100,7 @@ def setup(app):
100100
app.connect("autodoc-process-signature", process_sig)
101101
max_jobs = os.getenv("MAX_JOBS", str(2 * os.cpu_count()))
102102
print(f"Installing Triton Python package using {max_jobs} threads")
103-
subprocess.run("pip install -e ../python", shell=True, env=os.environ.copy())
103+
subprocess.run("pip install -e ../", shell=True, env=os.environ.copy())
104104

105105
setup_generated_mlir_docs()
106106

python/tutorials/08-grouped-gemm.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -392,12 +392,12 @@ def alloc_fn(size: int, alignment: int, stream: Optional[int]):
392392
tri_out = group_gemm_fn(group_A, group_B)
393393
ref_out = [torch.matmul(a, b) for a, b in zip(group_A, group_B)]
394394
for i in range(group_size):
395-
assert torch.allclose(ref_out[i], tri_out[i], atol=1e-2, rtol=0)
395+
assert torch.allclose(ref_out[i], tri_out[i], atol=1e-2, rtol=1e-2)
396396

397397
if supports_tma():
398398
tri_tma_out = group_gemm_tma_fn(group_A, group_B_T)
399399
for i in range(group_size):
400-
assert torch.allclose(ref_out[i], tri_tma_out[i], atol=1e-2, rtol=0)
400+
assert torch.allclose(ref_out[i], tri_tma_out[i], atol=1e-2, rtol=1e-2)
401401

402402

403403
# only launch the kernel, no tensor preparation here to remove all overhead

0 commit comments

Comments
 (0)