Skip to content

Commit e3ac59c

Browse files
authored
[Gluon][Frontend] Allow Gluon globals to be accessed from @jit fns (#7099)
An exception already exists for `triton.language.*` globals.
1 parent d9fcc10 commit e3ac59c

File tree

2 files changed

+5
-4
lines changed

2 files changed

+5
-4
lines changed

python/test/gluon/test_frontend.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
from triton.experimental import gluon
99
from triton.experimental.gluon import language as ttgl
1010
from triton.experimental.gluon.language.nvidia import blackwell
11-
from triton.experimental.gluon.language.nvidia.blackwell import mbarrier, tma
11+
from triton.experimental.gluon.language.nvidia.blackwell import mbarrier, tma, TensorMemoryLayout
1212
from triton._filecheck import filecheck_test, run_parser
1313
import triton.language as tl
1414
from triton._internal_testing import is_cuda
@@ -123,7 +123,7 @@ def test_tensor_memory(fresh_knobs):
123123
knobs.compilation.disable_line_info = True
124124

125125
layout = ttgl.BlockedLayout(size_per_thread=[1, 64], threads_per_warp=[32, 1], warps_per_cta=[4, 1], order=[0, 1])
126-
tmem_layout = ttgl.nvidia.blackwell.TensorMemoryLayout(block=[128, 128], unpacked=True)
126+
tmem_layout = TensorMemoryLayout(block=[128, 128], unpacked=True)
127127
h = tensor_memory_kernel.warmup(layout, tmem_layout, num_warps=4, grid=(1, ))
128128
expecttest.assert_expected_inline(
129129
anonymize_ir(h.asm["source"]), """\
@@ -400,7 +400,7 @@ def test_tcgen05_mma(fresh_knobs):
400400
knobs.compilation.disable_line_info = True
401401

402402
nvmma_layout = ttgl.NVMMASharedLayout(swizzle_byte_width=128, element_bitwidth=16, rank=2)
403-
acc_layout = blackwell.TensorMemoryLayout([128, 128], unpacked=True)
403+
acc_layout = TensorMemoryLayout([128, 128], unpacked=True)
404404

405405
h = tcgen05_mma_kernel.warmup(nvmma_layout, acc_layout, grid=(1, ))
406406
expecttest.assert_expected_inline(
@@ -553,7 +553,7 @@ def kernel():
553553

554554
@gluon.jit
555555
def tmem_subslice_kernel():
556-
layout: ttgl.constexpr = ttgl.nvidia.blackwell.TensorMemoryLayout(block=[128, 128], unpacked=True)
556+
layout: ttgl.constexpr = TensorMemoryLayout(block=[128, 128], unpacked=True)
557557
tmem = ttgl.nvidia.blackwell.allocate_tensor_memory(ttgl.int32, [2, 256, 256], layout)
558558
tmem.subslice(0)
559559

python/triton/compiler/code_generator.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -382,6 +382,7 @@ def global_lookup(name: str, absent):
382382
getattr(val, "__triton_builtin__", False), #
383383
getattr(val, "__triton_aggregate__", False), #
384384
getattr(val, "__module__", "").startswith("triton.language"), #
385+
getattr(val, "__module__", "").startswith("triton.experimental.gluon.language"), #
385386
isinstance(val, language.dtype), #
386387
_is_namedtuple(val),
387388
self._is_constexpr_global(name), #

0 commit comments

Comments
 (0)