diff --git a/bin/RegisterTritonDialects.h b/bin/RegisterTritonDialects.h index 48049f02682f..96caa4f5d34d 100644 --- a/bin/RegisterTritonDialects.h +++ b/bin/RegisterTritonDialects.h @@ -1,7 +1,7 @@ #pragma once #include "amd/include/Dialect/TritonAMDGPU/IR/Dialect.h" #include "amd/include/TritonAMDGPUTransforms/Passes.h" -#include "nvidia/include/Dialect/NVGPU/IR/Dialect.h" +#include "nvidia/include/Dialect/NVG/IR/Dialect.h" #include "nvidia/include/Dialect/NVWS/IR/Dialect.h" #include "proton/Dialect/include/Conversion/ProtonGPUToLLVM/Passes.h" #include "proton/Dialect/include/Conversion/ProtonGPUToLLVM/ProtonAMDGPUToLLVM/Passes.h" @@ -28,7 +28,7 @@ #include "nvidia/hopper/include/Transforms/Passes.h" #include "nvidia/include/Dialect/NVWS/Transforms/Passes.h" -#include "nvidia/include/NVGPUToLLVM/Passes.h" +#include "nvidia/include/NVGToLLVM/Passes.h" #include "nvidia/include/TritonNVIDIAGPUToLLVM/Passes.h" #include "triton/Conversion/TritonGPUToLLVM/Passes.h" #include "triton/Conversion/TritonToTritonGPU/Passes.h" @@ -83,7 +83,7 @@ inline void registerTritonDialects(mlir::DialectRegistry ®istry) { mlir::triton::gpu::registerTritonGPUGlobalScratchAllocationPass(); mlir::triton::registerConvertWarpSpecializeToLLVM(); mlir::triton::registerConvertTritonGPUToLLVMPass(); - mlir::triton::registerConvertNVGPUToLLVMPass(); + mlir::triton::registerConvertNVGToLLVMPass(); mlir::triton::registerAllocateSharedMemoryNvPass(); mlir::registerLLVMDIScope(); mlir::LLVM::registerInlinerInterface(registry); @@ -123,7 +123,7 @@ inline void registerTritonDialects(mlir::DialectRegistry ®istry) { // NVWS passes mlir::triton::registerNVWSTransformsPasses(); - // NVGPU transform passes + // NVG transform passes mlir::registerNVHopperTransformsPasses(); // Proton passes @@ -143,7 +143,7 @@ inline void registerTritonDialects(mlir::DialectRegistry ®istry) { mlir::triton::instrument::TritonInstrumentDialect, mlir::math::MathDialect, mlir::arith::ArithDialect, mlir::scf::SCFDialect, mlir::gpu::GPUDialect, mlir::LLVM::LLVMDialect, mlir::NVVM::NVVMDialect, - mlir::triton::nvgpu::NVGPUDialect, mlir::triton::nvws::NVWSDialect, + mlir::triton::nvg::NVGDialect, mlir::triton::nvws::NVWSDialect, mlir::triton::amdgpu::TritonAMDGPUDialect, mlir::triton::proton::ProtonDialect, mlir::triton::proton::gpu::ProtonGPUDialect, mlir::ROCDL::ROCDLDialect, diff --git a/lib/Conversion/TritonInstrumentToLLVM/CMakeLists.txt b/lib/Conversion/TritonInstrumentToLLVM/CMakeLists.txt index 5a3c379304f1..342c0bfd4c4b 100644 --- a/lib/Conversion/TritonInstrumentToLLVM/CMakeLists.txt +++ b/lib/Conversion/TritonInstrumentToLLVM/CMakeLists.txt @@ -8,5 +8,5 @@ add_triton_library(TritonInstrumentToLLVM TritonGPUIR TritonInstrumentIR TritonNvidiaGPUIR - NVGPUIR + NVGIR ) diff --git a/lib/Conversion/TritonInstrumentToLLVM/InstrumentationToLLVM.cpp b/lib/Conversion/TritonInstrumentToLLVM/InstrumentationToLLVM.cpp index b09e2ebfc7f8..927513611a18 100644 --- a/lib/Conversion/TritonInstrumentToLLVM/InstrumentationToLLVM.cpp +++ b/lib/Conversion/TritonInstrumentToLLVM/InstrumentationToLLVM.cpp @@ -1,6 +1,6 @@ #include "mlir/Conversion/LLVMCommon/Pattern.h" #include "mlir/IR/ImplicitLocOpBuilder.h" -#include "third_party/nvidia/include/Dialect/NVGPU/IR/Dialect.h" +#include "third_party/nvidia/include/Dialect/NVG/IR/Dialect.h" #include "third_party/nvidia/include/TritonNVIDIAGPUToLLVM/PTXAsmFormat.h" #include "third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/Utility.h" #include "triton/Conversion/TritonGPUToLLVM/PatternTritonGPUOpToLLVM.h" @@ -313,7 +313,7 @@ struct BufferPointersOpConversion assert(op.getMemType() == tti::MemType::TENSOR_MEM && "Unsupported memory type"); TritonLLVMOpBuilder b(loc, rewriter); - base = rewriter.create(loc); + base = rewriter.create(loc); base = b.ptrtoint(i32_ty, base); } bufPointers = rewriter.create( diff --git a/python/test/unit/language/test_conversions.py b/python/test/unit/language/test_conversions.py index 2dc77c19521e..f035384325a2 100644 --- a/python/test/unit/language/test_conversions.py +++ b/python/test/unit/language/test_conversions.py @@ -334,10 +334,10 @@ def test_typeconvert_downcast(src_dtype, dst_dtype, rounding, max_repr, device): if is_cuda(): if src_dtype != 'float32' and torch.cuda.get_device_capability(0) < (9, 0): - pytest.skip("non-float32 downcast tests only supported on NVGPU with compute capability 9.0+") + pytest.skip("non-float32 downcast tests only supported on NVG with compute capability 9.0+") if dst_dtype in ('float8e5', 'float8e4nv') and rounding == 'rtne' and torch.cuda.get_device_capability(0) < (9, 0): - pytest.skip(f"{dst_dtype} downcast with RTNE rounding tests only supported on NVGPU with compute capability 9.0+") + pytest.skip(f"{dst_dtype} downcast with RTNE rounding tests only supported on NVG with compute capability 9.0+") if dst_dtype in ('float8e5b16', 'float8e4b8') and rounding == 'rtne': pytest.skip(f"{dst_dtype} downcast with RTNE rounding tests only supported on AMDGPU CDNA3") @@ -368,10 +368,10 @@ def test_typeconvert_downcast(src_dtype, dst_dtype, rounding, max_repr, device): def test_typeconvert_downcast_clamping(src_dtype, dst_dtype, mode, device, rounding="rtne"): if is_cuda(): if src_dtype != 'float32' and torch.cuda.get_device_capability(0) < (9, 0): - pytest.skip("non-float32 downcast tests only supported on NVGPU with compute capability 9.0+") + pytest.skip("non-float32 downcast tests only supported on NVG with compute capability 9.0+") if dst_dtype in ('float8e5', 'float8e4nv') and rounding == 'rtne' and torch.cuda.get_device_capability(0) < (9, 0): - pytest.skip(f"{dst_dtype} downcast with RTNE rounding tests only supported on NVGPU with compute capability 9.0+") + pytest.skip(f"{dst_dtype} downcast with RTNE rounding tests only supported on NVG with compute capability 9.0+") converter = { tl.float8e4nv: torch.float8_e4m3fn, diff --git a/python/test/unit/language/test_core.py b/python/test/unit/language/test_core.py index bf3d85417cff..2b98adb7ed3f 100644 --- a/python/test/unit/language/test_core.py +++ b/python/test/unit/language/test_core.py @@ -121,9 +121,9 @@ def check_type_supported(dtype, device): if device in ['cuda']: cc = torch.cuda.get_device_capability() if cc[0] < 8 and (dtype is tl.bfloat16 or dtype == "bfloat16" or dtype is torch.bfloat16): - pytest.skip("bfloat16 is only supported on NVGPU with cc >= 80") + pytest.skip("bfloat16 is only supported on NVG with cc >= 80") if cc[0] < 9 and dtype in {tl.float8e4nv, "float8e4nv", "float8_e4m3fn"}: - pytest.skip("float8e4nv is only supported on NVGPU with cc >= 90") + pytest.skip("float8e4nv is only supported on NVG with cc >= 90") if is_interpreter(): if dtype in [tl.bfloat16, "bfloat16", torch.bfloat16]: pytest.skip("bfloat16 is not supported in the interpreter") diff --git a/test/Conversion/atomic_ldst.mlir b/test/Conversion/atomic_ldst.mlir index 30290f536ffe..88ded953f756 100644 --- a/test/Conversion/atomic_ldst.mlir +++ b/test/Conversion/atomic_ldst.mlir @@ -1,5 +1,5 @@ -// RUN: triton-opt %s --allocate-shared-memory-nv=compute-capability=90 --convert-triton-gpu-to-llvm=compute-capability=90 2>&1 | FileCheck %s --check-prefix=CHECK-TTG2NVGPU -// RUN: triton-opt %s --allocate-shared-memory-nv=compute-capability=90 --convert-triton-gpu-to-llvm=compute-capability=90 --convert-nv-gpu-to-llvm 2>&1 | FileCheck %s --check-prefix=CHECK-NVGPU2LLVM +// RUN: triton-opt %s --allocate-shared-memory-nv=compute-capability=90 --convert-triton-gpu-to-llvm=compute-capability=90 2>&1 | FileCheck %s --check-prefix=CHECK-TTG2NVG +// RUN: triton-opt %s --allocate-shared-memory-nv=compute-capability=90 --convert-triton-gpu-to-llvm=compute-capability=90 --convert-nv-gpu-to-llvm 2>&1 | FileCheck %s --check-prefix=CHECK-NVG2LLVM module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 4 : i32, "ttg.threads-per-warp" = 32 : i32} { tt.func public @kernel_r(%arg0: !tt.ptr {tt.divisibility = 16 : i32}) { %cst = arith.constant 0.000000e+00 : f32 @@ -10,18 +10,18 @@ module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 4 : i32, "ttg.thr %1 = arith.muli %0, %c128_i32 : i32 %2 = arith.cmpi slt, %1, %c512_i32 : i32 - // CHECK-TTG2NVGPU: nvgpu.ld_acquire acquire, gpu - // CHECK-NVGPU2LLVM: ld.global.gpu.acquire.b32 + // CHECK-TTG2NVG: nvg.ld_acquire acquire, gpu + // CHECK-NVG2LLVM: ld.global.gpu.acquire.b32 %3 = tt.atomic_rmw fadd, acquire, gpu, %arg0, %cst, %2 : (!tt.ptr, f32, i1) -> f32 tt.store %arg0, %3 : !tt.ptr - // CHECK-TTG2NVGPU: nvgpu.ld_acquire acquire, cta - // CHECK-NVGPU2LLVM: ld.global.cta.acquire.b32 + // CHECK-TTG2NVG: nvg.ld_acquire acquire, cta + // CHECK-NVG2LLVM: ld.global.cta.acquire.b32 %4 = tt.atomic_rmw fadd, acquire, cta, %arg0, %cst, %true : (!tt.ptr, f32, i1) -> f32 tt.store %arg0, %4 : !tt.ptr - // CHECK-TTG2NVGPU: nvgpu.ld_acquire acquire, sys - // CHECK-NVGPU2LLVM: ld.global.sys.acquire.b32 + // CHECK-TTG2NVG: nvg.ld_acquire acquire, sys + // CHECK-NVG2LLVM: ld.global.sys.acquire.b32 %5 = tt.atomic_rmw fadd, acquire, sys, %arg0, %cst, %2 : (!tt.ptr, f32, i1) -> f32 tt.store %arg0, %5 : !tt.ptr tt.return diff --git a/test/Conversion/nvgpu_to_llvm.mlir b/test/Conversion/nvgpu_to_llvm.mlir index c9b4804c04e1..3d8019825440 100644 --- a/test/Conversion/nvgpu_to_llvm.mlir +++ b/test/Conversion/nvgpu_to_llvm.mlir @@ -7,7 +7,7 @@ llvm.func @cluster_id() -> i32 { // CHECK: nvvm.read.ptx.sreg.cluster.ctaid.z // CHECK: nvvm.read.ptx.sreg.cluster.nctaid.x // CHECK: nvvm.read.ptx.sreg.cluster.nctaid.y - %id = nvgpu.cluster_id + %id = nvg.cluster_id llvm.return %id : i32 } @@ -39,7 +39,7 @@ llvm.func @cluster_id() -> i32 { llvm.func @wgmma(%desc: i64, %in: !struct_64xf32) { // CHECK: wgmma.mma_async.sync.aligned.m64n256k32.f32.e5m2.e5m2 %false = llvm.mlir.constant(false) : i1 -%acc0 = nvgpu.wgmma %desc, %desc, %false { +%acc0 = nvg.wgmma %desc, %desc, %false { eltTypeA = 3 : i32, eltTypeB = 3 : i32, eltTypeC = 7 : i32, @@ -52,7 +52,7 @@ llvm.func @wgmma(%desc: i64, %in: !struct_64xf32) { // CHECK: // wait for regs: $0,$1,$2,{{.*}},$127 // CHECK: wgmma.wait_group.sync.aligned 0; - %out = nvgpu.wgmma_wait_group %in {pendings = 0 : i32} : !struct_64xf32 + %out = nvg.wgmma_wait_group %in {pendings = 0 : i32} : !struct_64xf32 llvm.return } @@ -73,7 +73,7 @@ module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 4 : i32, ttg.shar // CHECK: llvm.inline_asm has_side_effects asm_dialect = att operand_attrs = [] "@$0 tcgen05.dealloc.cta_group::1.sync.aligned.b32 $1, 128;", "b,r" %[[PRED]], %{{.+}} : (i1, !llvm.ptr<6>) -> !llvm.void llvm.mlir.global external @global_smem() {addr_space = 3 : i32, alignment = 16 : i64} : !llvm.array<0 x i8> llvm.func @tensor_memory_base_lowering() -> i32 attributes {nvvm.kernel = 1 : ui1, nvvm.maxntid = array} { - %263 = nvgpu.tensor_memory_base + %263 = nvg.tensor_memory_base %264 = llvm.ptrtoint %263 : !llvm.ptr<6> to i32 llvm.return %264 : i32 } @@ -95,7 +95,7 @@ llvm.func @tensor_memory_base_warpgroup() attributes {nvvm.kernel = 1 : ui1, nvv } // CHECK: partition0 partition0() num_warps(1) { - %0 = nvgpu.tensor_memory_base + %0 = nvg.tensor_memory_base // CHECK-NEXT: "use"(%arg0) "use"(%0) : (!llvm.ptr<6>) -> () ttg.warp_return @@ -115,7 +115,7 @@ llvm.func @warpid_warp_specialize() { // CHECK: [[TIDX:%.*]] = nvvm.read.ptx.sreg.tid.x // CHECK: [[ID:%.*]] = llvm.udiv [[TIDX]], [[C32]] // CHECK: [[UNIFORM:%.*]] = nvvm.shfl.sync idx {{%[0-9]+}}, [[ID]] - %0 = nvgpu.warp_id + %0 = nvg.warp_id // CHECK: "use"([[UNIFORM]]) "use"(%0) : (i32) -> () @@ -126,7 +126,7 @@ llvm.func @warpid_warp_specialize() { // CHECK: [[TIDX:%.*]] = nvvm.read.ptx.sreg.tid.x // CHECK: [[ID:%.*]] = llvm.udiv [[TIDX]], [[C32]] // CHECK: [[UNIFORM:%.*]] = nvvm.shfl.sync idx {{%[0-9]+}}, [[ID]] - %1 = nvgpu.warp_id + %1 = nvg.warp_id // CHECK: "use"([[UNIFORM]]) "use"(%1) : (i32) -> () ttg.warp_yield @@ -141,7 +141,7 @@ llvm.func @warpid_warp_specialize() { // CHECK: [[REL_TIDX:%.*]] = llvm.sub [[TIDX]], [[C192]] // CHECK: [[ID:%.*]] = llvm.udiv [[REL_TIDX]], [[C32]] // CHECK: [[UNIFORM:%.*]] = nvvm.shfl.sync idx {{%[0-9]+}}, [[ID]] - %1 = nvgpu.warp_id + %1 = nvg.warp_id // CHECK: "use"([[UNIFORM]]) "use"(%1) : (i32) -> () ttg.warp_return @@ -155,7 +155,7 @@ llvm.func @warpid_warp_specialize() { // CHECK: [[REL_TIDX:%.*]] = llvm.sub [[TIDX]], [[C128]] // CHECK: [[ID:%.*]] = llvm.udiv [[REL_TIDX]], [[C32]] // CHECK: [[UNIFORM:%.*]] = nvvm.shfl.sync idx {{%[0-9]+}}, [[ID]] - %1 = nvgpu.warp_id + %1 = nvg.warp_id // CHECK: "use"([[UNIFORM]]) "use"(%1) : (i32) -> () ttg.warp_return @@ -172,7 +172,7 @@ module attributes {"ttg.num-warps" = 1 : i32, "ttg.threads-per-warp" = 32 : i32} // CHECK-LABEL: @one_warp tt.func @one_warp() -> i32 { // CHECK-NEXT: [[C0:%.*]] = llvm.mlir.constant(0 : i32) - %0 = nvgpu.warp_id + %0 = nvg.warp_id // CHECK-NEXT: return [[C0]] tt.return %0 : i32 } @@ -192,7 +192,7 @@ tt.func @one_contextual_warp() { // CHECK: partition0 partition0() num_warps(1) { // CHECK-NEXT: [[C0:%.*]] = llvm.mlir.constant(0 : i32) - %0 = nvgpu.warp_id + %0 = nvg.warp_id // CHECK-NEXT: "use"([[C0]]) "use"(%0) : (i32) -> () ttg.warp_return diff --git a/test/Conversion/tritongpu_to_llvm_blackwell.mlir b/test/Conversion/tritongpu_to_llvm_blackwell.mlir index 3a1579b292b0..7aec87e6afb4 100644 --- a/test/Conversion/tritongpu_to_llvm_blackwell.mlir +++ b/test/Conversion/tritongpu_to_llvm_blackwell.mlir @@ -7,7 +7,7 @@ #tmem = #ttng.tensor_memory_encoding module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 8 : i32} { // CHECK-LABEL: @tc_gen5_mma - // CHECK: %[[WID:.+]] = nvgpu.warp_id + // CHECK: %[[WID:.+]] = nvg.warp_id // CHECK: %[[C0:.+]] = llvm.mlir.constant(0 : i32) : i32 // CHECK: %[[P0:.+]] = llvm.icmp "eq" %[[WID]], %[[C0]] : i32 // CHECK: %[[P1:.+]] = llvm.and %{{.*}}, %[[P0]] : i1 @@ -105,7 +105,7 @@ module attributes {"ttg.num-ctas" = 2 : i32, "ttg.num-warps" = 8 : i32} { #tmem = #ttng.tensor_memory_encoding module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 4 : i32, ttg.shared = 65544 : i32, ttg.target = "cuda:100", ttg.tensor_memory_size = 128 : i32, "ttg.threads-per-warp" = 32 : i32} { // CHECK-LABEL: @tensor_memory_ld - // CHECK: nvgpu.tensor_memory_base + // CHECK: nvg.tensor_memory_base // CHECK: tcgen05.st.sync.aligned.32x32b.x128.b32 // CHECK: nvvm.tcgen05.wait // CHECK: tcgen05.ld.sync.aligned.32x32b.x128.b32 @@ -154,7 +154,7 @@ module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 4 : i32, ttg.shar #tmem = #ttng.tensor_memory_encoding module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 4 : i32, ttg.shared = 65544 : i32, ttg.target = "cuda:100", ttg.tensor_memory_size = 128 : i32, "ttg.threads-per-warp" = 32 : i32} { // CHECK-LABEL: @tensor_memory_ld_m64 - // CHECK: nvgpu.tensor_memory_base + // CHECK: nvg.tensor_memory_base // CHECK: tcgen05.st.sync.aligned.32x32b.x128.b32 // CHECK: nvvm.tcgen05.wait // CHECK: tcgen05.ld.sync.aligned.32x32b.x128.b32 @@ -174,7 +174,7 @@ module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 4 : i32, ttg.shar #tmem = #ttng.tensor_memory_encoding module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 4 : i32, ttg.shared = 65544 : i32, ttg.target = "cuda:100", ttg.tensor_memory_size = 128 : i32, "ttg.threads-per-warp" = 32 : i32} { // CHECK-LABEL: @tensor_memory_unpack_f16 - // CHECK: nvgpu.tensor_memory_base + // CHECK: nvg.tensor_memory_base // CHECK: tcgen05.st.sync.aligned.32x32b.x64.unpack::16b.b32 // CHECK: nvvm.tcgen05.wait // CHECK: tcgen05.ld.sync.aligned.32x32b.x64.pack::16b.b32 @@ -197,7 +197,7 @@ module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 4 : i32, ttg.shar module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 4 : i32} { // CHECK-LABEL: @tc_gen5_mma_block_scale // CHECK: %[[TMEM_BASE:.+]] = llvm.ptrtoint %arg2 : !llvm.ptr<3> to i32 - // CHECK: %[[WID:.+]] = nvgpu.warp_id + // CHECK: %[[WID:.+]] = nvg.warp_id // CHECK: %[[C0:.+]] = llvm.mlir.constant(0 : i32) : i32 // CHECK: %[[P0:.+]] = llvm.icmp "eq" %[[WID]], %[[C0]] : i32 // CHECK: %[[P1:.+]] = llvm.and %{{.*}}, %[[P0]] : i1 @@ -865,7 +865,7 @@ tt.func private @load_store_16x32bx1_broadcast(%arg0: !ttg.memdesc<16x8xi8, #tme #tmem = #ttng.tensor_memory_encoding module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 4 : i32, ttg.shared = 65544 : i32, ttg.target = "cuda:100", ttg.tensor_memory_size = 128 : i32, "ttg.threads-per-warp" = 32 : i32} { // CHECK-LABEL: @tensor_memory_st - // CHECK: nvgpu.tensor_memory_base + // CHECK: nvg.tensor_memory_base // CHECK: tcgen05.st.sync.aligned.32x32b.x128.b32 // CHECK: nvvm.tcgen05.wait tt.func public @tensor_memory_st(%arg0: !tt.ptr, %arg1: !tt.ptr, %arg2: !tt.ptr) { diff --git a/test/Conversion/tritongpu_to_llvm_hopper.mlir b/test/Conversion/tritongpu_to_llvm_hopper.mlir index b8db8d449b75..1370583ba88b 100644 --- a/test/Conversion/tritongpu_to_llvm_hopper.mlir +++ b/test/Conversion/tritongpu_to_llvm_hopper.mlir @@ -7,13 +7,13 @@ module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 8 : i32} { // CHECK-LABEL: @dot_high_precision_acc tt.func @dot_high_precision_acc(%a: !ttg.memdesc<128x128xf8E5M2, #shared, #smem>, %b: !ttg.memdesc<128x256xf8E5M2, #shared1, #smem>, %c: tensor<128x256xf32, #mma>) { - // CHECK: nvgpu.wgmma + // CHECK: nvg.wgmma // CHECK-COUNT-128: llvm.fadd - // CHECK: nvgpu.wgmma + // CHECK: nvg.wgmma // CHECK-COUNT-128: llvm.fadd - // CHECK: nvgpu.wgmma + // CHECK: nvg.wgmma // CHECK-COUNT-128: llvm.fadd - // CHECK: nvgpu.wgmma + // CHECK: nvg.wgmma // CHECK-COUNT-128: llvm.fadd %m = ttng.warp_group_dot %a, %b, %c {maxNumImpreciseAcc = 32 : i32, inputPrecision = 0 : i32} : @@ -31,13 +31,13 @@ module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 8 : i32} { module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 8 : i32} { // CHECK-LABEL: @dot_low_precision_acc tt.func @dot_low_precision_acc(%a: !ttg.memdesc<128x128xf8E5M2, #shared, #smem>, %b: !ttg.memdesc<128x256xf8E5M2, #shared1, #smem>, %c: tensor<128x256xf32, #mma>) { - // CHECK: nvgpu.wgmma + // CHECK: nvg.wgmma // CHECK-NOT: llvm.fadd - // CHECK: nvgpu.wgmma + // CHECK: nvg.wgmma // CHECK-NOT: llvm.fadd - // CHECK: nvgpu.wgmma + // CHECK: nvg.wgmma // CHECK-NOT: llvm.fadd - // CHECK: nvgpu.wgmma + // CHECK: nvg.wgmma // CHECK-NOT: llvm.fadd // CHECK: llvm.return %m = ttng.warp_group_dot %a, %b, %c @@ -56,13 +56,13 @@ module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 8 : i32} { module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 8 : i32} { // CHECK-LABEL: @dot_mix_precision_acc tt.func @dot_mix_precision_acc(%a: !ttg.memdesc<128x128xf8E5M2, #shared, #smem>, %b: !ttg.memdesc<128x256xf8E5M2, #shared1, #smem>, %c: tensor<128x256xf32, #mma>) { - // CHECK: nvgpu.wgmma + // CHECK: nvg.wgmma // CHECK-NOT: llvm.fadd - // CHECK: nvgpu.wgmma + // CHECK: nvg.wgmma // CHECK-COUNT-128: llvm.fadd - // CHECK: nvgpu.wgmma + // CHECK: nvg.wgmma // CHECK-NOT: llvm.fadd - // CHECK: nvgpu.wgmma + // CHECK: nvg.wgmma // CHECK-COUNT-128: llvm.fadd // CHECK: llvm.return %m = ttng.warp_group_dot %a, %b, %c @@ -81,7 +81,7 @@ module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 8 : i32} { module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 4 : i32} { // CHECK-LABEL: @dot_zero_acc // Generate a wgmma with 2 sources. - // CHECK: nvgpu.wgmma %{{.*}}, %{{.*}} { + // CHECK: nvg.wgmma %{{.*}}, %{{.*}} { tt.func @dot_zero_acc(%a: !ttg.memdesc<128x64xf16, #shared, #smem>, %b: !ttg.memdesc<64x64xf16, #shared1, #smem>) { %cst = arith.constant dense<0.000000e+00> : tensor<128x64xf32, #mma> %m = ttng.warp_group_dot %a, %b, %cst {inputPrecision = 0 : i32, maxNumImpreciseAcc = 0 : i32} : @@ -90,7 +90,7 @@ module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 4 : i32} { } // CHECK-LABEL: @wgmma_on_subtile - // CHECK: nvgpu.wgmma %{{.*}}, %{{.*}} + // CHECK: nvg.wgmma %{{.*}}, %{{.*}} tt.func @wgmma_on_subtile(%a: tensor<128x16xf16, #ttg.dot_op<{opIdx = 0, parent = #mma, kWidth = 2}>>, %b: !ttg.memdesc<16x256xf16, #shared1, #smem, mutable, 3x64x256>){ %cst = arith.constant dense<0.000000e+00> : tensor<128x256xf32, #mma> %m = ttng.warp_group_dot %a, %b, %cst {inputPrecision = 0 : i32, isAsync = true} : tensor<128x16xf16, #ttg.dot_op<{opIdx = 0, parent = #mma, kWidth = 2}>> * !ttg.memdesc<16x256xf16, #shared1, #smem, mutable, 3x64x256> -> tensor<128x256xf32, #mma> @@ -106,8 +106,8 @@ module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 4 : i32} { module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 4 : i32} { // CHECK-LABEL: @dot_reg_operand_A // Generate a wgmma where the first operand is a struct. - // CHECK: nvgpu.wgmma {{.*}} : (!llvm.struct<(i32, i32, i32, i32)>, i64, i1) -> !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32)> - // CHECK: nvgpu.wgmma_wait_group %{{.*}} {pendings = 0 : i32} : !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32)> + // CHECK: nvg.wgmma {{.*}} : (!llvm.struct<(i32, i32, i32, i32)>, i64, i1) -> !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32)> + // CHECK: nvg.wgmma_wait_group %{{.*}} {pendings = 0 : i32} : !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32)> tt.func @dot_reg_operand_A(%a: tensor<128x64xf16, #mma>, %b: !ttg.memdesc<64x64xf16, #shared, #smem>) { %cst = arith.constant dense<0.000000e+00> : tensor<128x64xf32, #mma> %opA = ttg.convert_layout %a : tensor<128x64xf16, #mma> -> tensor<128x64xf16, #ttg.dot_op<{opIdx = 0, parent = #mma, kWidth = 2}>> @@ -126,8 +126,8 @@ module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 4 : i32} { module attributes {"ttg.num-ctas" = 1 : i32, "ttg.num-warps" = 8 : i32} { // CHECK-LABEL: @dot_reg_operand_A_fp8 // Generate a wgmma where the first operand is a struct. - // CHECK: nvgpu.wgmma {{.*}} : (!llvm.struct<(i32, i32, i32, i32)>, i64, i1) -> !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32)> - // CHECK: nvgpu.wgmma_wait_group %{{.*}} {pendings = 0 : i32} + // CHECK: nvg.wgmma {{.*}} : (!llvm.struct<(i32, i32, i32, i32)>, i64, i1) -> !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32, f32)> + // CHECK: nvg.wgmma_wait_group %{{.*}} {pendings = 0 : i32} tt.func @dot_reg_operand_A_fp8(%a: tensor<128x128xf8E5M2, #ttg.dot_op<{opIdx = 0, parent = #mma, kWidth = 4}>>, %b: !ttg.memdesc<128x256xf8E5M2, #shared, #smem>) { %cst = arith.constant dense<0.000000e+00> : tensor<128x256xf32, #mma1> %m = ttng.warp_group_dot %a, %b, %cst { maxNumImpreciseAcc = 1073741824 : i32, inputPrecision = 0 : i32 } : @@ -548,13 +548,13 @@ module attributes {"ttg.target" = "cuda:90", "ttg.num-warps" = 4 : i32} { // CHECK-LABEL: @warpgroup_dot_wait_1_input tt.func @warpgroup_dot_wait_1_input(%arg0: tensor<128xf32, #blocked>) { - // CHECK: nvgpu.wgmma_wait_group + // CHECK: nvg.wgmma_wait_group ttng.warp_group_dot_wait %arg0 {pendings = 0 : i32} : tensor<128xf32, #blocked> tt.return } tt.func @warpgroup_dot_wait_2_inputs(%arg0: tensor<128xf32, #blocked>, %arg1: tensor<128xf32, #blocked>) { - // CHECK: nvgpu.wgmma_wait_group + // CHECK: nvg.wgmma_wait_group ttng.warp_group_dot_wait %arg0, %arg1 {pendings = 0 : i32} : tensor<128xf32, #blocked>, tensor<128xf32, #blocked> tt.return } diff --git a/test/Conversion/tritoninstrument_to_llvm.mlir b/test/Conversion/tritoninstrument_to_llvm.mlir index 5071a5361368..9002df9a57cf 100644 --- a/test/Conversion/tritoninstrument_to_llvm.mlir +++ b/test/Conversion/tritoninstrument_to_llvm.mlir @@ -4,7 +4,7 @@ module attributes {"ttg.num-warps" = 4 : i32, ttg.target = "cuda:90"} { // CHECK-LABEL: @experimental_buffer_pointers_tmem -// CHECK:nvgpu.tensor_memory_base +// CHECK:nvg.tensor_memory_base tt.func private @experimental_buffer_pointers_tmem() { tti.experimental_buffer_pointers [0, 42], tensor_mem : tensor<2xi64, #blocked> tt.return diff --git a/test/Hopper/WarpSpecialization/ws_code_partition.mlir b/test/Hopper/WarpSpecialization/ws_code_partition.mlir index 64019fc399d4..c040efea9d95 100644 --- a/test/Hopper/WarpSpecialization/ws_code_partition.mlir +++ b/test/Hopper/WarpSpecialization/ws_code_partition.mlir @@ -1,4 +1,4 @@ -// RUN: triton-opt %s -split-input-file --nvgpu-test-ws-code-partition=num-buffers=1 | FileCheck %s +// RUN: triton-opt %s -split-input-file --nvg-test-ws-code-partition=num-buffers=1 | FileCheck %s // CHECK-LABEL: @matmul_kernel_one_consumer // CHECK: ttg.warp_specialize{{.*}}requestedRegisters = array diff --git a/test/Hopper/WarpSpecialization/ws_data_partition.mlir b/test/Hopper/WarpSpecialization/ws_data_partition.mlir index 001ce8c3c66c..a96e1d51eaa3 100644 --- a/test/Hopper/WarpSpecialization/ws_data_partition.mlir +++ b/test/Hopper/WarpSpecialization/ws_data_partition.mlir @@ -1,4 +1,4 @@ -// RUN: triton-opt %s -split-input-file --nvgpu-test-ws-data-partition=num-warp-groups=3 | FileCheck %s +// RUN: triton-opt %s -split-input-file --nvg-test-ws-data-partition=num-warp-groups=3 | FileCheck %s // CHECK-LABEL: @matmul_persistent_ws_cooperative_kernel #blocked = #ttg.blocked<{sizePerThread = [1, 1], threadsPerWarp = [1, 32], warpsPerCTA = [2, 2], order = [1, 0]}> diff --git a/test/Hopper/WarpSpecialization/ws_task_id_propagation.mlir b/test/Hopper/WarpSpecialization/ws_task_id_propagation.mlir index 88c05dc86b2d..eb30e9d1f450 100644 --- a/test/Hopper/WarpSpecialization/ws_task_id_propagation.mlir +++ b/test/Hopper/WarpSpecialization/ws_task_id_propagation.mlir @@ -1,4 +1,4 @@ -// RUN: triton-opt %s -split-input-file --nvgpu-test-taskid-propagate=num-warp-groups=2 | FileCheck %s +// RUN: triton-opt %s -split-input-file --nvg-test-taskid-propagate=num-warp-groups=2 | FileCheck %s #blocked = #ttg.blocked<{sizePerThread = [1, 1], threadsPerWarp = [1, 32], warpsPerCTA = [2, 2], order = [1, 0]}> #blocked1 = #ttg.blocked<{sizePerThread = [1, 1], threadsPerWarp = [1, 32], warpsPerCTA = [1, 4], order = [1, 0]}> diff --git a/test/Hopper/WarpSpecialization/ws_task_partition.mlir b/test/Hopper/WarpSpecialization/ws_task_partition.mlir index 6752ce617ffb..16e8f9406446 100644 --- a/test/Hopper/WarpSpecialization/ws_task_partition.mlir +++ b/test/Hopper/WarpSpecialization/ws_task_partition.mlir @@ -1,4 +1,4 @@ -// RUN: triton-opt %s -split-input-file --nvgpu-test-ws-task-partition=num-warp-groups=3 | FileCheck %s +// RUN: triton-opt %s -split-input-file --nvg-test-ws-task-partition=num-warp-groups=3 | FileCheck %s // CHECK-LABEL: @matmul_persistent_tma_ws_cooperative_kernel // CHECK: %[[#GA:]] = tt.descriptor_load {{.*}} {async_task_id = array} diff --git a/third_party/nvidia/CMakeLists.txt b/third_party/nvidia/CMakeLists.txt index ff1ea248de6e..1d3399e1adab 100644 --- a/third_party/nvidia/CMakeLists.txt +++ b/third_party/nvidia/CMakeLists.txt @@ -3,7 +3,7 @@ include_directories(${CMAKE_CURRENT_BINARY_DIR}/include) add_subdirectory(include) add_subdirectory(lib) if(TRITON_BUILD_PYTHON_MODULE) - add_triton_plugin(TritonNVIDIA ${CMAKE_CURRENT_SOURCE_DIR}/triton_nvidia.cc LINK_LIBS TritonNVIDIAGPUToLLVM NVGPUToLLVM) + add_triton_plugin(TritonNVIDIA ${CMAKE_CURRENT_SOURCE_DIR}/triton_nvidia.cc LINK_LIBS TritonNVIDIAGPUToLLVM NVGToLLVM) target_link_libraries(TritonNVIDIA PRIVATE Python3::Module pybind11::headers) endif() if(TRITON_BUILD_UT) diff --git a/third_party/nvidia/backend/compiler.py b/third_party/nvidia/backend/compiler.py index 38060e6b3970..2af09a91780a 100644 --- a/third_party/nvidia/backend/compiler.py +++ b/third_party/nvidia/backend/compiler.py @@ -265,13 +265,13 @@ def make_ttgir(mod, metadata, opt, capability): if capability // 10 >= 8: passes.ttgpuir.add_f32_dot_tc(pm) # TODO(Qingyi): Move PlanCTAPass to the front of CoalescePass - nvidia.passes.ttnvgpuir.add_plan_cta(pm, cluster_info) + nvidia.passes.ttnvgir.add_plan_cta(pm, cluster_info) passes.ttgpuir.add_remove_layout_conversions(pm) passes.ttgpuir.add_optimize_thread_locality(pm) passes.ttgpuir.add_accelerate_matmul(pm) passes.ttgpuir.add_remove_layout_conversions(pm) passes.ttgpuir.add_optimize_dot_operands(pm, capability >= 80) - nvidia.passes.ttnvgpuir.add_optimize_descriptor_encoding(pm) + nvidia.passes.ttnvgir.add_optimize_descriptor_encoding(pm) passes.ttir.add_loop_aware_cse(pm) if capability // 10 in [8, 9]: passes.ttgpuir.add_fuse_nested_loops(pm) @@ -289,7 +289,7 @@ def make_ttgir(mod, metadata, opt, capability): passes.ttir.add_triton_licm(pm) passes.ttgpuir.add_optimize_accumulator_init(pm) passes.ttgpuir.add_hoist_tmem_alloc(pm, False) - nvidia.passes.ttnvgpuir.add_promote_lhs_to_tmem(pm) + nvidia.passes.ttnvgir.add_promote_lhs_to_tmem(pm) passes.ttgpuir.add_assign_latencies(pm, opt.num_stages) passes.ttgpuir.add_schedule_loops(pm) passes.ttgpuir.add_warp_specialize(pm, opt.num_stages) @@ -298,7 +298,7 @@ def make_ttgir(mod, metadata, opt, capability): passes.ttgpuir.add_combine_tensor_select_and_if(pm) # hoist again and allow hoisting out of if statements passes.ttgpuir.add_hoist_tmem_alloc(pm, True) - nvidia.passes.ttnvgpuir.add_remove_tmem_tokens(pm) + nvidia.passes.ttnvgir.add_remove_tmem_tokens(pm) else: passes.ttir.add_triton_licm(pm) passes.common.add_canonicalizer(pm) @@ -306,17 +306,17 @@ def make_ttgir(mod, metadata, opt, capability): passes.ttgpuir.add_prefetch(pm) passes.ttgpuir.add_optimize_dot_operands(pm, capability >= 80) passes.ttgpuir.add_coalesce_async_copy(pm) - nvidia.passes.ttnvgpuir.add_optimize_tmem_layouts(pm) + nvidia.passes.ttnvgir.add_optimize_tmem_layouts(pm) if capability // 10 >= 9: - nvidia.passes.ttnvgpuir.add_tma_lowering(pm) + nvidia.passes.ttnvgir.add_tma_lowering(pm) passes.ttgpuir.add_remove_layout_conversions(pm) - nvidia.passes.ttnvgpuir.add_interleave_tmem(pm) + nvidia.passes.ttnvgir.add_interleave_tmem(pm) passes.ttgpuir.add_reduce_data_duplication(pm) passes.ttgpuir.add_reorder_instructions(pm) passes.ttir.add_loop_aware_cse(pm) passes.common.add_symbol_dce(pm) - nvidia.passes.ttnvgpuir.add_fence_insertion(pm, capability) - nvidia.passes.ttnvgpuir.add_lower_mma(pm) + nvidia.passes.ttnvgir.add_fence_insertion(pm, capability) + nvidia.passes.ttnvgir.add_lower_mma(pm) passes.common.add_sccp(pm) passes.common.add_cse(pm) passes.common.add_canonicalizer(pm) @@ -357,20 +357,20 @@ def make_llir(self, src, metadata, options, capability): passes.convert.add_scf_to_cf(pm) passes.gluon.add_inliner(pm) nvidia.passes.ttgpuir.add_allocate_shared_memory_nv(pm, capability, ptx_version) - nvidia.passes.ttnvgpuir.add_allocate_tensor_memory(pm) + nvidia.passes.ttnvgir.add_allocate_tensor_memory(pm) if knobs.compilation.instrumentation_mode == "consan": # Call ConcurrencySanitizerPass here, before allocating global scratch memory but after allocating tensor and shared passes.ttgpuir.add_concurrency_sanitizer(pm) passes.ttgpuir.add_allocate_global_scratch_memory(pm) - nvidia.passes.ttnvgpuir.add_proxy_fence_insertion(pm, capability) + nvidia.passes.ttnvgir.add_proxy_fence_insertion(pm, capability) # instrumentation point here so we can override IRs above (e.g., ttir and ttgir) if CUDABackend.instrumentation: CUDABackend.instrumentation.patch("ttgpuir_to_llvmir", pm, mod.context) nvidia.passes.ttgpuir.add_to_llvmir(pm, capability, ptx_version) passes.common.add_canonicalizer(pm) passes.common.add_cse(pm) - nvidia.passes.ttnvgpuir.add_nvgpu_to_llvm(pm) - nvidia.passes.ttnvgpuir.add_warp_specialize_to_llvm(pm) + nvidia.passes.ttnvgir.add_nvg_to_llvm(pm) + nvidia.passes.ttnvgir.add_warp_specialize_to_llvm(pm) passes.common.add_canonicalizer(pm) passes.common.add_cse(pm) passes.common.add_symbol_dce(pm) diff --git a/third_party/nvidia/hopper/include/Transforms/Passes.td b/third_party/nvidia/hopper/include/Transforms/Passes.td index e65aa9bb4caa..43d75baa3762 100644 --- a/third_party/nvidia/hopper/include/Transforms/Passes.td +++ b/third_party/nvidia/hopper/include/Transforms/Passes.td @@ -3,7 +3,7 @@ include "mlir/Pass/PassBase.td" -def NVGPUWarpSpecialization : Pass<"nvgpu-warp-specialization", "mlir::ModuleOp"> { +def NVGWarpSpecialization : Pass<"nvg-warp-specialization", "mlir::ModuleOp"> { let summary = "Automatic Warp specialization for NVIDIA GPU"; let description = [{ @@ -23,7 +23,7 @@ def NVGPUWarpSpecialization : Pass<"nvgpu-warp-specialization", "mlir::ModuleOp" ]; } -def NVGPUTestWSTaskPartition : Pass<"nvgpu-test-ws-task-partition", "mlir::ModuleOp"> { +def NVGTestWSTaskPartition : Pass<"nvg-test-ws-task-partition", "mlir::ModuleOp"> { let summary = "test warp specialization task partition"; let description = "This pass computes a warp schedule partition by annoating anchor operations with async task ids"; @@ -36,7 +36,7 @@ def NVGPUTestWSTaskPartition : Pass<"nvgpu-test-ws-task-partition", "mlir::Modul ]; } -def NVGPUTestWSTaskIdPropagate : Pass<"nvgpu-test-taskid-propagate", "mlir::ModuleOp"> { +def NVGTestWSTaskIdPropagate : Pass<"nvg-test-taskid-propagate", "mlir::ModuleOp"> { let summary = "test warp specialization task id propagation"; let description = [{ @@ -54,7 +54,7 @@ def NVGPUTestWSTaskIdPropagate : Pass<"nvgpu-test-taskid-propagate", "mlir::Modu ]; } -def NVGPUTestWSDataPartition : Pass<"nvgpu-test-ws-data-partition", "mlir::ModuleOp"> { +def NVGTestWSDataPartition : Pass<"nvg-test-ws-data-partition", "mlir::ModuleOp"> { let summary = "test warp specialization data partition"; let description = "This pass partitions operations into multiple suboperations which operate on smaller data shapes"; @@ -67,7 +67,7 @@ def NVGPUTestWSDataPartition : Pass<"nvgpu-test-ws-data-partition", "mlir::Modul ]; } -def NVGPUTestWSCodePartition: Pass<"nvgpu-test-ws-code-partition", "mlir::ModuleOp"> { +def NVGTestWSCodePartition: Pass<"nvg-test-ws-code-partition", "mlir::ModuleOp"> { let summary = "test warp specialization code partition"; let description = "This pass generates warp specialized code baed on task id attributes."; diff --git a/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization.cpp b/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization.cpp index 1f50795d7087..8395d91fc207 100644 --- a/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization.cpp +++ b/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization.cpp @@ -5,7 +5,7 @@ #include "triton/Dialect/TritonGPU/IR/Dialect.h" #include "triton/Dialect/TritonGPU/Transforms/PipeliningUtility.h" -#define DEBUG_TYPE "nvgpu-warp-specialization" +#define DEBUG_TYPE "nvg-warp-specialization" #define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE "]: ") #define LDBG(X) LLVM_DEBUG(DBGS() << X << "\n") @@ -17,14 +17,14 @@ bool doDataPartition(triton::FuncOp &funcOp, unsigned numConsumerGroups); void doCodePartition(triton::FuncOp &funcOp, unsigned numBuffers); void doTokenLowering(triton::FuncOp &funcOp, unsigned numConsumerGroups); -#define GEN_PASS_DEF_NVGPUWARPSPECIALIZATION +#define GEN_PASS_DEF_NVGWARPSPECIALIZATION #include "nvidia/hopper/include/Transforms/Passes.h.inc" -class NVGPUWarpSpecializationPass - : public impl::NVGPUWarpSpecializationBase { +class NVGWarpSpecializationPass + : public impl::NVGWarpSpecializationBase { public: - using impl::NVGPUWarpSpecializationBase< - NVGPUWarpSpecializationPass>::NVGPUWarpSpecializationBase; + using impl::NVGWarpSpecializationBase< + NVGWarpSpecializationPass>::NVGWarpSpecializationBase; void runOnFuncOp(triton::FuncOp funcOp) { SmallVector loops; diff --git a/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/CodePartitionUtility.cpp b/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/CodePartitionUtility.cpp index 5403561c66b9..b2b111399920 100644 --- a/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/CodePartitionUtility.cpp +++ b/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/CodePartitionUtility.cpp @@ -12,7 +12,7 @@ namespace ttg = mlir::triton::gpu; namespace ttng = ::mlir::triton::nvidia_gpu; namespace mlir { -#define DEBUG_TYPE "nvgpu-ws-utility" +#define DEBUG_TYPE "nvg-ws-utility" #define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE "]: ") #define LDBG(X) LLVM_DEBUG(DBGS() << X << "\n") diff --git a/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSBuffer.cpp b/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSBuffer.cpp index e0b21091059c..8fc95a20f350 100644 --- a/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSBuffer.cpp +++ b/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSBuffer.cpp @@ -30,7 +30,7 @@ namespace ttg = mlir::triton::gpu; namespace ttng = ::mlir::triton::nvidia_gpu; namespace mlir { -#define DEBUG_TYPE "nvgpu-ws-buffer" +#define DEBUG_TYPE "nvg-ws-buffer" #define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE "]: ") #define LDBG(X) LLVM_DEBUG(DBGS() << X << "\n") diff --git a/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSCodePartition.cpp b/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSCodePartition.cpp index 20ad1ca87d89..f319895ab42d 100644 --- a/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSCodePartition.cpp +++ b/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSCodePartition.cpp @@ -28,7 +28,7 @@ namespace ttng = ::mlir::triton::nvidia_gpu; namespace ttnvws = ::mlir::triton::nvws; namespace mlir { -#define DEBUG_TYPE "nvgpu-ws-code-partition" +#define DEBUG_TYPE "nvg-ws-code-partition" #define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE "]: ") #define LDBG(X) LLVM_DEBUG(DBGS() << X << "\n") @@ -1315,14 +1315,14 @@ void doCodePartition(triton::FuncOp &funcOp, unsigned numBuffers) { }); } -#define GEN_PASS_DEF_NVGPUTESTWSCODEPARTITION +#define GEN_PASS_DEF_NVGTESTWSCODEPARTITION #include "nvidia/hopper/include/Transforms/Passes.h.inc" -class NVGPUTestWSCodePartitionPass - : public impl::NVGPUTestWSCodePartitionBase { +class NVGTestWSCodePartitionPass + : public impl::NVGTestWSCodePartitionBase { public: - using impl::NVGPUTestWSCodePartitionBase< - NVGPUTestWSCodePartitionPass>::NVGPUTestWSCodePartitionBase; + using impl::NVGTestWSCodePartitionBase< + NVGTestWSCodePartitionPass>::NVGTestWSCodePartitionBase; void runOnFuncOp(triton::FuncOp funcOp) { // Disable code partitioning when numBuffers is 0. diff --git a/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSDataPartition.cpp b/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSDataPartition.cpp index 60c0dda770a5..d0dda8164858 100644 --- a/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSDataPartition.cpp +++ b/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSDataPartition.cpp @@ -13,7 +13,7 @@ namespace ttng = mlir::triton::nvidia_gpu; namespace mlir { -#define DEBUG_TYPE "nvgpu-ws-data-partition" +#define DEBUG_TYPE "nvg-ws-data-partition" #define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE "]: ") #define LDBG(X) LLVM_DEBUG(DBGS() << X << "\n") @@ -1365,14 +1365,14 @@ bool doDataPartition(triton::FuncOp &funcOp, unsigned numConsumerGroups) { return true; } -#define GEN_PASS_DEF_NVGPUTESTWSDATAPARTITION +#define GEN_PASS_DEF_NVGTESTWSDATAPARTITION #include "nvidia/hopper/include/Transforms/Passes.h.inc" -class NVGPUTestWSDataPartitionPass - : public impl::NVGPUTestWSDataPartitionBase { +class NVGTestWSDataPartitionPass + : public impl::NVGTestWSDataPartitionBase { public: - using impl::NVGPUTestWSDataPartitionBase< - NVGPUTestWSDataPartitionPass>::NVGPUTestWSDataPartitionBase; + using impl::NVGTestWSDataPartitionBase< + NVGTestWSDataPartitionPass>::NVGTestWSDataPartitionBase; void runOnFuncOp(triton::FuncOp funcOp) { if (numWarpGroups > 2) diff --git a/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSLowerMem.cpp b/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSLowerMem.cpp index 37571a006609..5c4e333acfa9 100644 --- a/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSLowerMem.cpp +++ b/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSLowerMem.cpp @@ -30,7 +30,7 @@ namespace ttg = mlir::triton::gpu; namespace ttng = ::mlir::triton::nvidia_gpu; namespace mlir { -#define DEBUG_TYPE "nvgpu-ws-lower-mem" +#define DEBUG_TYPE "nvg-ws-lower-mem" #define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE "]: ") #define LDBG(X) LLVM_DEBUG(DBGS() << X << "\n") diff --git a/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSSpecialize.cpp b/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSSpecialize.cpp index c8e1d03255ea..ab2e2d3cea13 100644 --- a/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSSpecialize.cpp +++ b/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSSpecialize.cpp @@ -31,7 +31,7 @@ namespace ttg = mlir::triton::gpu; namespace ttng = ::mlir::triton::nvidia_gpu; namespace mlir { -#define DEBUG_TYPE "nvgpu-ws-specialize" +#define DEBUG_TYPE "nvg-ws-specialize" #define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE "]: ") #define LDBG(X) LLVM_DEBUG(DBGS() << X << "\n") diff --git a/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSTaskIdPropagate.cpp b/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSTaskIdPropagate.cpp index ff06f7eb7f76..3fd83d33aaa2 100644 --- a/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSTaskIdPropagate.cpp +++ b/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSTaskIdPropagate.cpp @@ -12,7 +12,7 @@ #include "triton/Dialect/TritonGPU/Transforms/Utility.h" #include "triton/Dialect/TritonNvidiaGPU/IR/Dialect.h" -#define DEBUG_TYPE "nvgpu-ws-task-id-propagate" +#define DEBUG_TYPE "nvg-ws-task-id-propagate" #define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE "]: ") #define LDBG(X) LLVM_DEBUG(DBGS() << X << "\n") @@ -63,15 +63,15 @@ int doTaskIdPropagate(triton::FuncOp &funcOp) { return 0; } -#define GEN_PASS_DEF_NVGPUTESTWSTASKIDPROPAGATE +#define GEN_PASS_DEF_NVGTESTWSTASKIDPROPAGATE #include "nvidia/hopper/include/Transforms/Passes.h.inc" -class NVGPUTestWSTaskIdPropagatePass - : public impl::NVGPUTestWSTaskIdPropagateBase< - NVGPUTestWSTaskIdPropagatePass> { +class NVGTestWSTaskIdPropagatePass + : public impl::NVGTestWSTaskIdPropagateBase< + NVGTestWSTaskIdPropagatePass> { public: - using impl::NVGPUTestWSTaskIdPropagateBase< - NVGPUTestWSTaskIdPropagatePass>::NVGPUTestWSTaskIdPropagateBase; + using impl::NVGTestWSTaskIdPropagateBase< + NVGTestWSTaskIdPropagatePass>::NVGTestWSTaskIdPropagateBase; void runOnFuncOp(triton::FuncOp funcOp) { llvm::DenseSet anchorOps; diff --git a/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSTaskPartition.cpp b/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSTaskPartition.cpp index 4952e4d346eb..00482231a7b5 100644 --- a/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSTaskPartition.cpp +++ b/third_party/nvidia/hopper/lib/Transforms/WarpSpecialization/WSTaskPartition.cpp @@ -8,7 +8,7 @@ #include "triton/Dialect/TritonGPU/Transforms/Utility.h" #include "triton/Dialect/TritonNvidiaGPU/IR/Dialect.h" -#define DEBUG_TYPE "nvgpu-ws-task-partition" +#define DEBUG_TYPE "nvg-ws-task-partition" #define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE "]: ") #define LDBG(X) LLVM_DEBUG(DBGS() << X << "\n") @@ -136,14 +136,14 @@ void doTaskPartition(triton::FuncOp &funcOp, unsigned numWarpGroups) { }); } -#define GEN_PASS_DEF_NVGPUTESTWSTASKPARTITION +#define GEN_PASS_DEF_NVGTESTWSTASKPARTITION #include "nvidia/hopper/include/Transforms/Passes.h.inc" -class NVGPUTestWSTaskPartitionPass - : public impl::NVGPUTestWSTaskPartitionBase { +class NVGTestWSTaskPartitionPass + : public impl::NVGTestWSTaskPartitionBase { public: - using impl::NVGPUTestWSTaskPartitionBase< - NVGPUTestWSTaskPartitionPass>::NVGPUTestWSTaskPartitionBase; + using impl::NVGTestWSTaskPartitionBase< + NVGTestWSTaskPartitionPass>::NVGTestWSTaskPartitionBase; void runOnFuncOp(triton::FuncOp funcOp) { if (numWarpGroups > 1) diff --git a/third_party/nvidia/include/CMakeLists.txt b/third_party/nvidia/include/CMakeLists.txt index 2ef7aab10682..14275b6c827b 100644 --- a/third_party/nvidia/include/CMakeLists.txt +++ b/third_party/nvidia/include/CMakeLists.txt @@ -1,3 +1,3 @@ add_subdirectory(Dialect) add_subdirectory(TritonNVIDIAGPUToLLVM) -add_subdirectory(NVGPUToLLVM) +add_subdirectory(NVGToLLVM) diff --git a/third_party/nvidia/include/Dialect/CMakeLists.txt b/third_party/nvidia/include/Dialect/CMakeLists.txt index 221301530ed3..d04c49a4e39a 100644 --- a/third_party/nvidia/include/Dialect/CMakeLists.txt +++ b/third_party/nvidia/include/Dialect/CMakeLists.txt @@ -1,2 +1,2 @@ -add_subdirectory(NVGPU) +add_subdirectory(NVG) add_subdirectory(NVWS) diff --git a/third_party/nvidia/include/Dialect/NVGPU/IR/CMakeLists.txt b/third_party/nvidia/include/Dialect/NVGPU/IR/CMakeLists.txt index f8932cdc4b7f..f7abda184b45 100644 --- a/third_party/nvidia/include/Dialect/NVGPU/IR/CMakeLists.txt +++ b/third_party/nvidia/include/Dialect/NVGPU/IR/CMakeLists.txt @@ -1,18 +1,18 @@ set(MLIR_BINARY_DIR ${CMAKE_BINARY_DIR}) -set(LLVM_TARGET_DEFINITIONS NVGPUOps.td) -mlir_tablegen(Dialect.h.inc -gen-dialect-decls -dialect=nvgpu) -mlir_tablegen(Dialect.cpp.inc -gen-dialect-defs -dialect=nvgpu) +set(LLVM_TARGET_DEFINITIONS NVGOps.td) +mlir_tablegen(Dialect.h.inc -gen-dialect-decls -dialect=nvg) +mlir_tablegen(Dialect.cpp.inc -gen-dialect-defs -dialect=nvg) mlir_tablegen(OpsConversions.inc -gen-llvmir-conversions) mlir_tablegen(Ops.h.inc -gen-op-decls) mlir_tablegen(Ops.cpp.inc -gen-op-defs) mlir_tablegen(OpsEnums.h.inc -gen-enum-decls) mlir_tablegen(OpsEnums.cpp.inc -gen-enum-defs) -add_mlir_doc(NVGPUDialect NVGPUDialect dialects/ -gen-dialect-doc) -add_mlir_doc(NVGPUOps NVGPUOps dialects/ -gen-op-doc) -add_public_tablegen_target(NVGPUTableGen) +add_mlir_doc(NVGDialect NVGDialect dialects/ -gen-dialect-doc) +add_mlir_doc(NVGOps NVGOps dialects/ -gen-op-doc) +add_public_tablegen_target(NVGTableGen) -set(LLVM_TARGET_DEFINITIONS NVGPUAttrDefs.td) -mlir_tablegen(NVGPUAttrDefs.h.inc -gen-attrdef-decls) -mlir_tablegen(NVGPUAttrDefs.cpp.inc -gen-attrdef-defs) -add_public_tablegen_target(NVGPUAttrDefsIncGen) +set(LLVM_TARGET_DEFINITIONS NVGAttrDefs.td) +mlir_tablegen(NVGAttrDefs.h.inc -gen-attrdef-decls) +mlir_tablegen(NVGAttrDefs.cpp.inc -gen-attrdef-defs) +add_public_tablegen_target(NVGAttrDefsIncGen) diff --git a/third_party/nvidia/include/Dialect/NVGPU/IR/Dialect.h b/third_party/nvidia/include/Dialect/NVGPU/IR/Dialect.h index 6e238af4f2bc..aa5f68cbe252 100644 --- a/third_party/nvidia/include/Dialect/NVGPU/IR/Dialect.h +++ b/third_party/nvidia/include/Dialect/NVGPU/IR/Dialect.h @@ -21,26 +21,26 @@ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#ifndef TRITON_DIALECT_NVGPU_IR_DIALECT_H_ -#define TRITON_DIALECT_NVGPU_IR_DIALECT_H_ +#ifndef TRITON_DIALECT_NVG_IR_DIALECT_H_ +#define TRITON_DIALECT_NVG_IR_DIALECT_H_ #include "mlir/Dialect/GPU/IR/GPUDialect.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/Dialect.h" -#include "nvidia/include/Dialect/NVGPU/IR/Dialect.h.inc" -#include "nvidia/include/Dialect/NVGPU/IR/OpsEnums.h.inc" +#include "nvidia/include/Dialect/NVG/IR/Dialect.h.inc" +#include "nvidia/include/Dialect/NVG/IR/OpsEnums.h.inc" #define GET_ATTRDEF_CLASSES -#include "nvidia/include/Dialect/NVGPU/IR/NVGPUAttrDefs.h.inc" +#include "nvidia/include/Dialect/NVG/IR/NVGAttrDefs.h.inc" #define GET_OP_CLASSES -#include "nvidia/include/Dialect/NVGPU/IR/Ops.h.inc" +#include "nvidia/include/Dialect/NVG/IR/Ops.h.inc" namespace mlir { namespace triton { -namespace nvgpu {} // namespace nvgpu +namespace nvg {} // namespace nvg } // namespace triton } // namespace mlir diff --git a/third_party/nvidia/include/Dialect/NVGPU/IR/NVGPUAttrDefs.td b/third_party/nvidia/include/Dialect/NVGPU/IR/NVGPUAttrDefs.td index c904824ef086..9e9248408dca 100644 --- a/third_party/nvidia/include/Dialect/NVGPU/IR/NVGPUAttrDefs.td +++ b/third_party/nvidia/include/Dialect/NVGPU/IR/NVGPUAttrDefs.td @@ -19,15 +19,15 @@ // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -#ifndef NVGPU_ATTRDEFS -#define NVGPU_ATTRDEFS +#ifndef NVG_ATTRDEFS +#define NVG_ATTRDEFS include "mlir/IR/AttrTypeBase.td" -include "NVGPUDialect.td" +include "NVGDialect.td" -class NVGPU_Attr traits = [], +class NVG_Attr traits = [], string baseCppClass = "::mlir::Attribute"> - : AttrDef { + : AttrDef { } #endif diff --git a/third_party/nvidia/include/Dialect/NVGPU/IR/NVGPUDialect.td b/third_party/nvidia/include/Dialect/NVGPU/IR/NVGPUDialect.td index 6978173d4982..827d169f8fb1 100644 --- a/third_party/nvidia/include/Dialect/NVGPU/IR/NVGPUDialect.td +++ b/third_party/nvidia/include/Dialect/NVGPU/IR/NVGPUDialect.td @@ -19,17 +19,17 @@ // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -#ifndef NVGPU_DIALECT -#define NVGPU_DIALECT +#ifndef NVG_DIALECT +#define NVG_DIALECT include "mlir/IR/OpBase.td" -def NVGPU_Dialect : Dialect { - let name = "nvgpu"; - let cppNamespace = "::mlir::triton::nvgpu"; +def NVG_Dialect : Dialect { + let name = "nvg"; + let cppNamespace = "::mlir::triton::nvg"; let description = [{ - NVGPU Dialect. + NVG Dialect. }]; let dependentDialects = [ diff --git a/third_party/nvidia/include/Dialect/NVGPU/IR/NVGPUOps.td b/third_party/nvidia/include/Dialect/NVGPU/IR/NVGPUOps.td index 8eca21375fd1..3780460b113f 100644 --- a/third_party/nvidia/include/Dialect/NVGPU/IR/NVGPUOps.td +++ b/third_party/nvidia/include/Dialect/NVGPU/IR/NVGPUOps.td @@ -19,27 +19,27 @@ // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -#ifndef NVGPU_OPS -#define NVGPU_OPS +#ifndef NVG_OPS +#define NVG_OPS include "mlir/IR/OpBase.td" include "mlir/IR/EnumAttr.td" include "mlir/Dialect/LLVMIR/LLVMOpBase.td" include "mlir/Interfaces/InferTypeOpInterface.td" // SameOperandsAndResultType -include "NVGPUDialect.td" -include "NVGPUAttrDefs.td" +include "NVGDialect.td" +include "NVGAttrDefs.td" def LLVM_PointerGlobal : LLVM_PointerInAddressSpace<1>; def LLVM_PointerShared : LLVM_PointerInAddressSpace<3>; def LLVM_PointerTensorMemory : LLVM_PointerInAddressSpace<6>; -def NVGPU_Float : AnyTypeOf<[F8E4M3FN, F8E4M3FNUZ, F8E5M2, F8E5M2FNUZ, F16, BF16, F32, F64], "floating-point">; -def NVGPU_Int : AnyTypeOf<[I1, I8, I16, I32, I64], "integer">; -def NVGPU_ScalarLike : AnyTypeOf<[NVGPU_Float, NVGPU_Int]>; +def NVG_Float : AnyTypeOf<[F8E4M3FN, F8E4M3FNUZ, F8E5M2, F8E5M2FNUZ, F16, BF16, F32, F64], "floating-point">; +def NVG_Int : AnyTypeOf<[I1, I8, I16, I32, I64], "integer">; +def NVG_ScalarLike : AnyTypeOf<[NVG_Float, NVG_Int]>; -def NVGPU_MemSemanticAttr : I32EnumAttr< +def NVG_MemSemanticAttr : I32EnumAttr< "MemSemantic", "", [ I32EnumAttrCase<"RELAXED", 1, "relaxed">, @@ -47,23 +47,23 @@ def NVGPU_MemSemanticAttr : I32EnumAttr< I32EnumAttrCase<"RELEASE", 3, "release">, I32EnumAttrCase<"ACQUIRE_RELEASE", 4, "acq_rel">, ]> { - let cppNamespace = "::mlir::triton::nvgpu"; + let cppNamespace = "::mlir::triton::nvg"; } -def NVGPU_MemSyncScopeAttr : I32EnumAttr< +def NVG_MemSyncScopeAttr : I32EnumAttr< "MemSyncScope", "", [ I32EnumAttrCase<"GPU", 1, "gpu">, I32EnumAttrCase<"CTA", 2, "cta">, I32EnumAttrCase<"SYSTEM", 3, "sys">, ]> { - let cppNamespace = "::mlir::triton::nvgpu"; + let cppNamespace = "::mlir::triton::nvg"; } -class NVGPU_Op traits = []> : - LLVM_OpBase; +class NVG_Op traits = []> : + LLVM_OpBase; -def NVGPU_WGMMAWaitGroupOp : NVGPU_Op<"wgmma_wait_group", [DeclareOpInterfaceMethods, +def NVG_WGMMAWaitGroupOp : NVG_Op<"wgmma_wait_group", [DeclareOpInterfaceMethods, AllTypesMatch<["input", "output"]>]> { let arguments = (ins LLVM_AnyStruct:$input, I32Attr:$pendings); let results = (outs LLVM_AnyStruct:$output); @@ -76,7 +76,7 @@ def WGMMA_LayoutAttr : I32EnumAttr<"WGMMALayout", I32EnumAttrCase<"row", 0>, I32EnumAttrCase<"col", 1> ]>{ - let cppNamespace = "::mlir::triton::nvgpu"; + let cppNamespace = "::mlir::triton::nvg"; } def WGMMA_EltTypeAttr : I32EnumAttr<"WGMMAEltType", @@ -91,12 +91,12 @@ def WGMMA_EltTypeAttr : I32EnumAttr<"WGMMAEltType", I32EnumAttrCase<"tf32", 6>, I32EnumAttrCase<"f32", 7> ]>{ - let cppNamespace = "::mlir::triton::nvgpu"; + let cppNamespace = "::mlir::triton::nvg"; } def WGMMA_OperandType : AnyTypeOf<[LLVM_AnyStruct, I64], "wgmma operand A/B type">; -def NVGPU_WGMMAOp : NVGPU_Op<"wgmma", []> { +def NVG_WGMMAOp : NVG_Op<"wgmma", []> { let arguments = (ins WGMMA_OperandType:$opA, WGMMA_OperandType:$opB, I1:$useC, Optional:$opC, I32Attr:$m, I32Attr:$n, I32Attr:$k, WGMMA_EltTypeAttr:$eltTypeC, WGMMA_EltTypeAttr:$eltTypeA, WGMMA_EltTypeAttr:$eltTypeB, @@ -105,28 +105,28 @@ def NVGPU_WGMMAOp : NVGPU_Op<"wgmma", []> { let assemblyFormat = "$opA `,` $opB `,` $useC (`,` $opC^)? attr-dict `:` functional-type(operands, $res)"; } -def NVGPU_ClusterCTAIdOp : NVGPU_Op<"cluster_id", [Pure]> { +def NVG_ClusterCTAIdOp : NVG_Op<"cluster_id", [Pure]> { let results = (outs I32:$result); let assemblyFormat = "attr-dict"; } -def NVGPU_LoadAcquireOp : NVGPU_Op<"ld_acquire", [MemoryEffects<[MemRead]>]> { +def NVG_LoadAcquireOp : NVG_Op<"ld_acquire", [MemoryEffects<[MemRead]>]> { let arguments = ( ins LLVM_PointerGlobal:$addr, Optional:$mask, - NVGPU_MemSemanticAttr:$sem, - NVGPU_MemSyncScopeAttr:$scope + NVG_MemSemanticAttr:$sem, + NVG_MemSyncScopeAttr:$scope ); - let results = (outs NVGPU_ScalarLike:$result); + let results = (outs NVG_ScalarLike:$result); let assemblyFormat = "$sem `,` $scope `,` $addr (`,` $mask^)? attr-dict `:` functional-type($addr, $result)"; } -def NVGPU_WarpIdOp : NVGPU_Op<"warp_id", [Pure]> { +def NVG_WarpIdOp : NVG_Op<"warp_id", [Pure]> { let results = (outs I32:$result); let assemblyFormat = "attr-dict"; } -def NVGPU_TensorMemoryBaseAddress : NVGPU_Op<"tensor_memory_base", [Pure]> { +def NVG_TensorMemoryBaseAddress : NVG_Op<"tensor_memory_base", [Pure]> { let description = [{ Op to represent base address of tensor memory in a kernel. This is used to simplify lowering from TritonGPU to LLVM. diff --git a/third_party/nvidia/include/NVGPUToLLVM/CMakeLists.txt b/third_party/nvidia/include/NVGPUToLLVM/CMakeLists.txt index f89521768f06..d1589fea8b73 100644 --- a/third_party/nvidia/include/NVGPUToLLVM/CMakeLists.txt +++ b/third_party/nvidia/include/NVGPUToLLVM/CMakeLists.txt @@ -1,3 +1,3 @@ set(LLVM_TARGET_DEFINITIONS Passes.td) -mlir_tablegen(Passes.h.inc -gen-pass-decls --name NVGPUToLLVM) -add_public_tablegen_target(NVGPUConversionPassIncGen) +mlir_tablegen(Passes.h.inc -gen-pass-decls --name NVGToLLVM) +add_public_tablegen_target(NVGConversionPassIncGen) diff --git a/third_party/nvidia/include/NVGPUToLLVM/NVGPUToLLVMPass.h b/third_party/nvidia/include/NVGPUToLLVM/NVGPUToLLVMPass.h index 90280967555b..53118e553e27 100644 --- a/third_party/nvidia/include/NVGPUToLLVM/NVGPUToLLVMPass.h +++ b/third_party/nvidia/include/NVGPUToLLVM/NVGPUToLLVMPass.h @@ -1,5 +1,5 @@ -#ifndef TRITON_CONVERSION_NVGPU_TO_LLVM_PASS_H -#define TRITON_CONVERSION_NVGPU_TO_LLVM_PASS_H +#ifndef TRITON_CONVERSION_NVG_TO_LLVM_PASS_H +#define TRITON_CONVERSION_NVG_TO_LLVM_PASS_H #include #include @@ -17,7 +17,7 @@ template class OperationPass; namespace triton { -namespace nvgpu { +namespace nvg { using Constraints = std::vector; using OperandsAndConstraints = std::vector>; @@ -28,7 +28,7 @@ rewriteAsPtxAsm(mlir::Operation *op, mlir::PatternRewriter &rewriter, const OperandsAndConstraints &operandsAndConstraints = {}, const Constraints &outputConstraints = {}); -} // namespace nvgpu +} // namespace nvg } // namespace triton diff --git a/third_party/nvidia/include/NVGPUToLLVM/Passes.h b/third_party/nvidia/include/NVGPUToLLVM/Passes.h index 6a0910d2a07b..ad9a54b6916b 100644 --- a/third_party/nvidia/include/NVGPUToLLVM/Passes.h +++ b/third_party/nvidia/include/NVGPUToLLVM/Passes.h @@ -1,18 +1,18 @@ -#ifndef NVGPU_CONVERSION_PASSES_H -#define NVGPU_CONVERSION_PASSES_H +#ifndef NVG_CONVERSION_PASSES_H +#define NVG_CONVERSION_PASSES_H #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Pass/Pass.h" -#include "nvidia/include/NVGPUToLLVM/NVGPUToLLVMPass.h" +#include "nvidia/include/NVGToLLVM/NVGToLLVMPass.h" namespace mlir { namespace triton { #define GEN_PASS_DECL -#include "nvidia/include/NVGPUToLLVM/Passes.h.inc" +#include "nvidia/include/NVGToLLVM/Passes.h.inc" #define GEN_PASS_REGISTRATION -#include "nvidia/include/NVGPUToLLVM/Passes.h.inc" +#include "nvidia/include/NVGToLLVM/Passes.h.inc" } // namespace triton } // namespace mlir diff --git a/third_party/nvidia/include/NVGPUToLLVM/Passes.td b/third_party/nvidia/include/NVGPUToLLVM/Passes.td index 2fe74401a4ae..cf991f8e836e 100644 --- a/third_party/nvidia/include/NVGPUToLLVM/Passes.td +++ b/third_party/nvidia/include/NVGPUToLLVM/Passes.td @@ -1,10 +1,10 @@ -#ifndef NVGPU_CONVERSION_PASSES -#define NVGPU_CONVERSION_PASSES +#ifndef NVG_CONVERSION_PASSES +#define NVG_CONVERSION_PASSES include "mlir/Pass/PassBase.td" -def ConvertNVGPUToLLVM : Pass<"convert-nv-gpu-to-llvm", "mlir::ModuleOp"> { - let summary = "Convert NVGPU to LLVM"; +def ConvertNVGToLLVM : Pass<"convert-nv-gpu-to-llvm", "mlir::ModuleOp"> { + let summary = "Convert NVG to LLVM"; let description = [{ }]; @@ -12,7 +12,7 @@ def ConvertNVGPUToLLVM : Pass<"convert-nv-gpu-to-llvm", "mlir::ModuleOp"> { let dependentDialects = ["mlir::arith::ArithDialect", "mlir::LLVM::LLVMDialect", "mlir::NVVM::NVVMDialect", - "mlir::triton::nvgpu::NVGPUDialect"]; + "mlir::triton::nvg::NVGDialect"]; } -#endif // NVGPU_CONVERSION_PASSES +#endif // NVG_CONVERSION_PASSES diff --git a/third_party/nvidia/include/TritonNVIDIAGPUToLLVM/Passes.td b/third_party/nvidia/include/TritonNVIDIAGPUToLLVM/Passes.td index c30bec368c46..475e5ae3540f 100644 --- a/third_party/nvidia/include/TritonNVIDIAGPUToLLVM/Passes.td +++ b/third_party/nvidia/include/TritonNVIDIAGPUToLLVM/Passes.td @@ -17,7 +17,7 @@ def ConvertTritonGPUToLLVM : Pass<"convert-triton-gpu-to-llvm", "mlir::ModuleOp" "mlir::triton::TritonDialect", "mlir::triton::gpu::TritonGPUDialect", "mlir::triton::nvidia_gpu::TritonNvidiaGPUDialect", - "mlir::triton::nvgpu::NVGPUDialect", + "mlir::triton::nvg::NVGDialect", "mlir::NVVM::NVVMDialect"]; let options = [ diff --git a/third_party/nvidia/lib/CMakeLists.txt b/third_party/nvidia/lib/CMakeLists.txt index 2ef7aab10682..14275b6c827b 100644 --- a/third_party/nvidia/lib/CMakeLists.txt +++ b/third_party/nvidia/lib/CMakeLists.txt @@ -1,3 +1,3 @@ add_subdirectory(Dialect) add_subdirectory(TritonNVIDIAGPUToLLVM) -add_subdirectory(NVGPUToLLVM) +add_subdirectory(NVGToLLVM) diff --git a/third_party/nvidia/lib/Dialect/CMakeLists.txt b/third_party/nvidia/lib/Dialect/CMakeLists.txt index 221301530ed3..d04c49a4e39a 100644 --- a/third_party/nvidia/lib/Dialect/CMakeLists.txt +++ b/third_party/nvidia/lib/Dialect/CMakeLists.txt @@ -1,2 +1,2 @@ -add_subdirectory(NVGPU) +add_subdirectory(NVG) add_subdirectory(NVWS) diff --git a/third_party/nvidia/lib/Dialect/NVGPU/IR/CMakeLists.txt b/third_party/nvidia/lib/Dialect/NVGPU/IR/CMakeLists.txt index 1fd118d2be99..45a7fae2888d 100644 --- a/third_party/nvidia/lib/Dialect/NVGPU/IR/CMakeLists.txt +++ b/third_party/nvidia/lib/Dialect/NVGPU/IR/CMakeLists.txt @@ -1,9 +1,9 @@ -add_triton_library(NVGPUIR +add_triton_library(NVGIR Dialect.cpp DEPENDS - NVGPUTableGen - NVGPUAttrDefsIncGen + NVGTableGen + NVGAttrDefsIncGen LINK_LIBS PUBLIC MLIRLLVMDialect diff --git a/third_party/nvidia/lib/Dialect/NVGPU/IR/Dialect.cpp b/third_party/nvidia/lib/Dialect/NVGPU/IR/Dialect.cpp index f623f50c63d0..c0432a8ba4e4 100644 --- a/third_party/nvidia/lib/Dialect/NVGPU/IR/Dialect.cpp +++ b/third_party/nvidia/lib/Dialect/NVGPU/IR/Dialect.cpp @@ -25,25 +25,25 @@ #include "mlir/IR/OpImplementation.h" // clang-format off -#include "Dialect/NVGPU/IR/Dialect.h" -#include "Dialect/NVGPU/IR/Dialect.cpp.inc" +#include "Dialect/NVG/IR/Dialect.h" +#include "Dialect/NVG/IR/Dialect.cpp.inc" // clang-format on using namespace mlir; -using namespace mlir::triton::nvgpu; +using namespace mlir::triton::nvg; -void mlir::triton::nvgpu::NVGPUDialect::initialize() { +void mlir::triton::nvg::NVGDialect::initialize() { addAttributes< #define GET_ATTRDEF_LIST -#include "Dialect/NVGPU/IR/NVGPUAttrDefs.cpp.inc" +#include "Dialect/NVG/IR/NVGAttrDefs.cpp.inc" >(); addOperations< #define GET_OP_LIST -#include "Dialect/NVGPU/IR/Ops.cpp.inc" +#include "Dialect/NVG/IR/Ops.cpp.inc" >(); } #define GET_OP_CLASSES -#include "Dialect/NVGPU/IR/Ops.cpp.inc" -#include "Dialect/NVGPU/IR/OpsEnums.cpp.inc" +#include "Dialect/NVG/IR/Ops.cpp.inc" +#include "Dialect/NVG/IR/OpsEnums.cpp.inc" diff --git a/third_party/nvidia/lib/NVGPUToLLVM/CMakeLists.txt b/third_party/nvidia/lib/NVGPUToLLVM/CMakeLists.txt index 9d7ae73931cb..61351ff69017 100644 --- a/third_party/nvidia/lib/NVGPUToLLVM/CMakeLists.txt +++ b/third_party/nvidia/lib/NVGPUToLLVM/CMakeLists.txt @@ -1,9 +1,9 @@ -add_triton_library(NVGPUToLLVM - NVGPUToLLVMPass.cpp +add_triton_library(NVGToLLVM + NVGToLLVMPass.cpp DEPENDS - NVGPUConversionPassIncGen + NVGConversionPassIncGen LINK_LIBS PUBLIC - NVGPUIR + NVGIR ) diff --git a/third_party/nvidia/lib/NVGPUToLLVM/NVGPUToLLVMPass.cpp b/third_party/nvidia/lib/NVGPUToLLVM/NVGPUToLLVMPass.cpp index 8c7dfc777105..af321be50a5a 100644 --- a/third_party/nvidia/lib/NVGPUToLLVM/NVGPUToLLVMPass.cpp +++ b/third_party/nvidia/lib/NVGPUToLLVM/NVGPUToLLVMPass.cpp @@ -1,7 +1,7 @@ -#include "NVGPUToLLVM/NVGPUToLLVMPass.h" -#include "NVGPUToLLVM/Passes.h" +#include "NVGToLLVM/NVGToLLVMPass.h" +#include "NVGToLLVM/Passes.h" -#include "Dialect/NVGPU/IR/Dialect.h" +#include "Dialect/NVG/IR/Dialect.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/LLVMIR/NVVMDialect.h" #include "mlir/IR/PatternMatch.h" @@ -11,15 +11,15 @@ #include "nvidia/lib/TritonNVIDIAGPUToLLVM/Utility.h" #include "llvm/Support/ErrorHandling.h" -namespace ttn = mlir::triton::nvgpu; +namespace ttn = mlir::triton::nvg; using ttn::Constraints; using ttn::OperandsAndConstraints; namespace mlir { namespace triton { -#define GEN_PASS_DEF_CONVERTNVGPUTOLLVM -#include "NVGPUToLLVM/Passes.h.inc" +#define GEN_PASS_DEF_CONVERTNVGTOLLVM +#include "NVGToLLVM/Passes.h.inc" namespace { @@ -72,7 +72,7 @@ Value convertToType(Value val, std::string constraint, Location loc, } SmallVector -getPtxOutputs(const nvgpu::Constraints &outputConstraints, +getPtxOutputs(const nvg::Constraints &outputConstraints, PTXBuilder &ptxBuilder) { SmallVector ptxOutputs; for (unsigned i = 0; i < outputConstraints.size(); i++) { @@ -169,9 +169,9 @@ std::string patchPtxAsm(Operation *op, std::string ptxAsm) { } template -class NVGPUOpGenericPattern : public OpRewritePattern { +class NVGOpGenericPattern : public OpRewritePattern { public: - explicit NVGPUOpGenericPattern(MLIRContext *context, std::string ptxAsm, + explicit NVGOpGenericPattern(MLIRContext *context, std::string ptxAsm, Constraints outputConstraints, Constraints inputConstraints) : OpRewritePattern(context), ptxAsm(std::move(ptxAsm)), @@ -270,11 +270,11 @@ class LoadAcquireOpPattern : public OpRewritePattern { auto &ld = ptxBuilder.create<>("ld") ->global() - .o("cta", op.getScope() == triton::nvgpu::MemSyncScope::CTA) - .o("gpu", op.getScope() == triton::nvgpu::MemSyncScope::GPU) - .o("sys", op.getScope() == triton::nvgpu::MemSyncScope::SYSTEM) - .o("acquire", op.getSem() == triton::nvgpu::MemSemantic::ACQUIRE) - .o("relaxed", op.getSem() == triton::nvgpu::MemSemantic::RELAXED) + .o("cta", op.getScope() == triton::nvg::MemSyncScope::CTA) + .o("gpu", op.getScope() == triton::nvg::MemSyncScope::GPU) + .o("sys", op.getScope() == triton::nvg::MemSyncScope::SYSTEM) + .o("acquire", op.getSem() == triton::nvg::MemSemantic::ACQUIRE) + .o("relaxed", op.getSem() == triton::nvg::MemSemantic::RELAXED) .b(width); ld(dstOpr, addrOpr).maybePredicate(op.getMask(), "b"); @@ -619,11 +619,11 @@ static void lowerTensorMemoryAlloc(ModuleOp mod) { } // anonymous namespace -class ConvertNVGPUToLLVM - : public impl::ConvertNVGPUToLLVMBase { +class ConvertNVGToLLVM + : public impl::ConvertNVGToLLVMBase { public: - using impl::ConvertNVGPUToLLVMBase< - ConvertNVGPUToLLVM>::ConvertNVGPUToLLVMBase; + using impl::ConvertNVGToLLVMBase< + ConvertNVGToLLVM>::ConvertNVGToLLVMBase; void runOnOperation() override { MLIRContext *context = &getContext(); @@ -642,7 +642,7 @@ class ConvertNVGPUToLLVM }; LogicalResult -nvgpu::rewriteAsPtxAsm(Operation *op, PatternRewriter &rewriter, +nvg::rewriteAsPtxAsm(Operation *op, PatternRewriter &rewriter, std::string ptxAsm, const OperandsAndConstraints &operandsAndConstraints, const Constraints &outputConstraints) { diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/CMakeLists.txt b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/CMakeLists.txt index cd84bf57df6f..247a40768abd 100644 --- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/CMakeLists.txt +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/CMakeLists.txt @@ -23,13 +23,13 @@ add_triton_library(TritonNVIDIAGPUToLLVM DEPENDS TritonNVIDIAGPUConversionPassIncGen - NVGPUAttrDefsIncGen + NVGAttrDefsIncGen LINK_LIBS PUBLIC TritonAnalysis TritonGPUToLLVM TritonInstrumentToLLVM MLIRReconcileUnrealizedCasts - NVGPUIR + NVGIR MLIRUBToLLVM ) diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ClusterOpsToLLVM.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ClusterOpsToLLVM.cpp index 2de9979ad52f..6b369a6b8789 100644 --- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ClusterOpsToLLVM.cpp +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/ClusterOpsToLLVM.cpp @@ -21,7 +21,7 @@ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#include "Dialect/NVGPU/IR/Dialect.h" +#include "Dialect/NVG/IR/Dialect.h" #include "PatternTritonGPUOpToLLVM.h" #include "mlir/Conversion/LLVMCommon/Pattern.h" #include "mlir/Dialect/LLVMIR/NVVMDialect.h" diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM.cpp index 7cdf034978d2..b872ff6fd889 100644 --- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM.cpp +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM.cpp @@ -135,7 +135,7 @@ struct WarpGroupDotWaitOpConversion Location loc = op.getLoc(); ValueRange inputs = adaptor.getInputs(); if (inputs.size() == 1) { - rewriter.replaceOpWithNewOp( + rewriter.replaceOpWithNewOp( op, inputs.front(), pendings); return success(); } @@ -160,7 +160,7 @@ struct WarpGroupDotWaitOpConversion } } Value packedOutput = - rewriter.create(loc, packed, pendings); + rewriter.create(loc, packed, pendings); // Unpack the output into the original struct types. SmallVector outputs; outputStructIndex = 0; diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/MMAHelpers.h b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/MMAHelpers.h index a9fd01d0b416..4b1430653194 100644 --- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/MMAHelpers.h +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/MMAHelpers.h @@ -130,7 +130,7 @@ class DotOpMmaSmemLoader : public DotOpMmaMemLoader { bases[kWarp][1] = {0, 0}; auto warpGroupToOffsetb128 = LinearLayout( bases, warpToOffset.getOutDims(), /*requireSurjective=*/false); - Value warpId = rewriter.create(loc); + Value warpId = rewriter.create(loc); Value warpStrideb128 = applyLinearLayout(loc, rewriter, warpGroupToOffsetb128, {{kWarp, warpId}})[0] diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/MMAv5.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/MMAv5.cpp index f50afe2fb5d6..111b30a05f8a 100644 --- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/MMAv5.cpp +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/MMAv5.cpp @@ -1,4 +1,4 @@ -#include "Dialect/NVGPU/IR/Dialect.h" +#include "Dialect/NVG/IR/Dialect.h" #include "MMAHelpers.h" #include "PatternTritonGPUOpToLLVM.h" #include "Utility.h" @@ -389,7 +389,7 @@ void convertDotImpl(const LLVMTypeConverter &typeConverter, // Only run mma on one thread. We currently use elect as ptxas is not able to // detect that tid.x == 0 is true only for 1 thread. - Value warpId = rewriter.create(loc); + Value warpId = rewriter.create(loc); Value isWarp0 = tb.icmp_eq(warpId, tb.i32_val(0)); if (twoCTAs) { // TODO: we have to sync the two CTAs because we currently don't use remove @@ -397,7 +397,7 @@ void convertDotImpl(const LLVMTypeConverter &typeConverter, rewriter.create(loc, false); rewriter.create(loc); - Value clusterId = rewriter.create(loc); + Value clusterId = rewriter.create(loc); Value cluster0 = tb.icmp_eq(clusterId, tb.i32_val(0)); pred = tb.and_(pred, cluster0); } diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/WGMMA.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/WGMMA.cpp index c8a54a834bb0..a11e170bf825 100644 --- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/WGMMA.cpp +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/DotOpToLLVM/WGMMA.cpp @@ -36,33 +36,33 @@ using ::mlir::triton::gpu::MemDescType; using ::mlir::triton::gpu::NvidiaMmaEncodingAttr; using ::mlir::triton::gpu::SharedEncodingTrait; -triton::nvgpu::WGMMAEltType getMmaRetType(Value d) { +triton::nvg::WGMMAEltType getMmaRetType(Value d) { auto dTy = cast(d.getType()).getElementType(); if (dTy.isF32()) { - return triton::nvgpu::WGMMAEltType::f32; + return triton::nvg::WGMMAEltType::f32; } else if (dTy.isF16()) { - return triton::nvgpu::WGMMAEltType::f16; + return triton::nvg::WGMMAEltType::f16; } else if (dTy.isInteger(32)) { - return triton::nvgpu::WGMMAEltType::s32; + return triton::nvg::WGMMAEltType::s32; } else { llvm::report_fatal_error("Unsupported mma result type found"); } } -triton::nvgpu::WGMMAEltType getMmaOperandType(Value a, bool allowTF32) { +triton::nvg::WGMMAEltType getMmaOperandType(Value a, bool allowTF32) { auto aTy = cast(a.getType()).getElementType(); if (aTy.isF16()) { - return triton::nvgpu::WGMMAEltType::f16; + return triton::nvg::WGMMAEltType::f16; } else if (aTy.isBF16()) { - return triton::nvgpu::WGMMAEltType::bf16; + return triton::nvg::WGMMAEltType::bf16; } else if (aTy.isF32() && allowTF32) { - return triton::nvgpu::WGMMAEltType::tf32; + return triton::nvg::WGMMAEltType::tf32; } else if (aTy.isInteger(8)) { - return triton::nvgpu::WGMMAEltType::s8; + return triton::nvg::WGMMAEltType::s8; } else if (llvm::isa(aTy)) { - return triton::nvgpu::WGMMAEltType::e5m2; + return triton::nvg::WGMMAEltType::e5m2; } else if (llvm::isa(aTy)) { - return triton::nvgpu::WGMMAEltType::e4m3; + return triton::nvg::WGMMAEltType::e4m3; } else { llvm::report_fatal_error("Unsupported mma operand type found"); } @@ -173,7 +173,7 @@ static SmallVector emitWait(ConversionPatternRewriter &rewriter, for (Value v : acc) { llvmStruct = b.insert_val(structTy, llvmStruct, v, i++); } - Value res = rewriter.create(loc, llvmStruct, + Value res = rewriter.create(loc, llvmStruct, pendings); SmallVector results; for (int i = 0; i < acc.size(); ++i) { @@ -235,14 +235,14 @@ LogicalResult convertDot(const LLVMTypeConverter *typeConverter, auto fc = unpackLLElements(loc, loadedC, rewriter); - triton::nvgpu::WGMMAEltType eltTypeC = getMmaRetType(d); - triton::nvgpu::WGMMAEltType eltTypeA = getMmaOperandType(a, allowTF32); - triton::nvgpu::WGMMAEltType eltTypeB = getMmaOperandType(b, allowTF32); + triton::nvg::WGMMAEltType eltTypeC = getMmaRetType(d); + triton::nvg::WGMMAEltType eltTypeA = getMmaOperandType(a, allowTF32); + triton::nvg::WGMMAEltType eltTypeB = getMmaOperandType(b, allowTF32); - triton::nvgpu::WGMMALayout layoutA = transA ? triton::nvgpu::WGMMALayout::col - : triton::nvgpu::WGMMALayout::row; - triton::nvgpu::WGMMALayout layoutB = transB ? triton::nvgpu::WGMMALayout::row - : triton::nvgpu::WGMMALayout::col; + triton::nvg::WGMMALayout layoutA = transA ? triton::nvg::WGMMALayout::col + : triton::nvg::WGMMALayout::row; + triton::nvg::WGMMALayout layoutB = transB ? triton::nvg::WGMMALayout::row + : triton::nvg::WGMMALayout::col; auto func = op->getParentOfType(); Operation *startSequence = rewriter.create(loc); @@ -294,7 +294,7 @@ LogicalResult convertDot(const LLVMTypeConverter *typeConverter, needsPartialAccumulator && (numLowPrecisionAcc >= maxNumImpreciseAcc || k == numRepK - 1); Value mmaAcc = needsPartialAccumulator ? partialAcc : d; - mmaAcc = rewriter.create( + mmaAcc = rewriter.create( loc, accTy, a, b, useC, mmaAcc, M, N, K, eltTypeC, eltTypeA, eltTypeB, layoutA, layoutB); useC = tb.i1_val(1); diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/LoadStoreOpToLLVM.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/LoadStoreOpToLLVM.cpp index c860a05fac64..25b7f63a6da6 100644 --- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/LoadStoreOpToLLVM.cpp +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/LoadStoreOpToLLVM.cpp @@ -1,4 +1,4 @@ -#include "Dialect/NVGPU/IR/Dialect.h" +#include "Dialect/NVG/IR/Dialect.h" #include "TargetInfo.h" #include "mlir/Conversion/LLVMCommon/TypeConverter.h" #include "mlir/Dialect/LLVMIR/NVVMDialect.h" @@ -819,12 +819,12 @@ struct AtomicRMWOpConversion SmallVector resultVals(elemsPerThread); // Lower AtomicRMWOp to a ld.acquire if possible - std::unordered_map + std::unordered_map ScopeMap = { - {triton::MemSyncScope::CTA, triton::nvgpu::MemSyncScope::CTA}, - {triton::MemSyncScope::GPU, triton::nvgpu::MemSyncScope::GPU}, + {triton::MemSyncScope::CTA, triton::nvg::MemSyncScope::CTA}, + {triton::MemSyncScope::GPU, triton::nvg::MemSyncScope::GPU}, {triton::MemSyncScope::SYSTEM, - triton::nvgpu::MemSyncScope::SYSTEM}}; + triton::nvg::MemSyncScope::SYSTEM}}; const bool doPTXLDPromotion = isPromotableToNVPTXLD(op) && vec == 1 && packed == 1 && ScopeMap.count(op.getScope()); @@ -845,11 +845,11 @@ struct AtomicRMWOpConversion if (doPTXLDPromotion) { Type convertedValueTy = getTypeConverter()->convertType(getElementTypeOrSelf(op.getType())); - auto loadAcquireOp = rewriter.create( + auto loadAcquireOp = rewriter.create( op.getLoc(), convertedValueTy, rmwPtr, pred, op.getSem() == triton::MemSemantic::ACQUIRE - ? triton::nvgpu::MemSemantic::ACQUIRE - : triton::nvgpu::MemSemantic::RELAXED, + ? triton::nvg::MemSemantic::ACQUIRE + : triton::nvg::MemSemantic::RELAXED, ScopeMap[op.getScope()]); auto ASMReturnTy = void_ty(ctx); @@ -1338,7 +1338,7 @@ struct AsyncTMACopyGlobalToLocalOpConversion auto mod = op->getParentOfType(); int numWarps = ttg::lookupNumWarps(op); int warpSize = ttg::TritonGPUDialect::getThreadsPerWarp(mod); - Value warpID = rewriter.create(loc); + Value warpID = rewriter.create(loc); Value pred = adaptor.getPred(); // Select just one thread for the TMA copy. This also helps the compiler to // figure out that the op is uniform. @@ -1364,7 +1364,7 @@ struct AsyncTMACopyGlobalToLocalOpConversion auto kBlock = str_attr("block"); const auto numCopies = msgToOffset.getInDimSize(kMsg); auto zero = b.i32_val(0); - auto ctaId = rewriter.create(loc); + auto ctaId = rewriter.create(loc); // The bounding box inner dimension must be less than or equal to the // swizzle size. @@ -1440,7 +1440,7 @@ LogicalResult convertTMAStoreLikeOp(Operation *op, auto mod = op->getParentOfType(); int numWarps = ttg::lookupNumWarps(op); int warpSize = ttg::TritonGPUDialect::getThreadsPerWarp(mod); - Value warpID = rewriter.create(loc); + Value warpID = rewriter.create(loc); auto shapePerCTA = ttg::getShapePerCTA(srcTy); int elementsPerCTA = product(shapePerCTA); @@ -1457,7 +1457,7 @@ LogicalResult convertTMAStoreLikeOp(Operation *op, auto kBlock = str_attr("block"); auto numCopies = msgToOffset.getInDimSize(kMsg); auto zero = b.i32_val(0); - auto ctaId = rewriter.create(loc); + auto ctaId = rewriter.create(loc); for (int copyIdx = 0; copyIdx < numCopies; copyIdx += numWarps) { int numWarpsToCopy = std::min(numCopies - copyIdx, numWarps); @@ -1650,8 +1650,8 @@ static LogicalResult iterateGatherScatterIndices( if (freeVars[kLane] != (threadsPerWarp - 1)) return op->emitError("x offsets must be broadcasted across each warp"); - Value warpId = rewriter.create(loc); - Value blockId = rewriter.create(loc); + Value warpId = rewriter.create(loc); + Value blockId = rewriter.create(loc); // Mask out warps with redundant x offsets. pred = b.and_(pred, diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/MemoryOpToLLVM.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/MemoryOpToLLVM.cpp index 7f057ff5fbbe..40a86d403447 100644 --- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/MemoryOpToLLVM.cpp +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/MemoryOpToLLVM.cpp @@ -1,4 +1,4 @@ -#include "Dialect/NVGPU/IR/Dialect.h" +#include "Dialect/NVG/IR/Dialect.h" #include "PatternTritonGPUOpToLLVM.h" #include "TargetInfo.h" #include "Utility.h" diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/TargetInfo.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/TargetInfo.cpp index bd661f26a74a..b2aafc6276ad 100644 --- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/TargetInfo.cpp +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/TargetInfo.cpp @@ -1,5 +1,5 @@ #include "TargetInfo.h" -#include "Dialect/NVGPU/IR/Dialect.h" +#include "Dialect/NVG/IR/Dialect.h" #include "TritonNVIDIAGPUToLLVM/PTXAsmFormat.h" #include "Utility.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" @@ -130,7 +130,7 @@ bool TargetInfo::supportMaximumMinimum() const { } Value TargetInfo::getClusterCTAId(RewriterBase &rewriter, Location loc) const { - return rewriter.create(loc, + return rewriter.create(loc, rewriter.getI32Type()); } diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/TensorMemoryToLLVM.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/TensorMemoryToLLVM.cpp index 7336013eed97..ba7f53ed0054 100644 --- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/TensorMemoryToLLVM.cpp +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/TensorMemoryToLLVM.cpp @@ -1,4 +1,4 @@ -#include "Dialect/NVGPU/IR/Dialect.h" +#include "Dialect/NVG/IR/Dialect.h" #include "DotOpToLLVM/MMAHelpers.h" #include "PatternTritonGPUOpToLLVM.h" #include "TritonNVIDIAGPUToLLVM/PTXAsmFormat.h" @@ -294,7 +294,7 @@ SmallVector lowerTMemLdSt(Location loc, return std::make_pair(std::get<1>(rowCol[0]), std::get<1>(rowCol[1])); }; - Value warpId = rewriter.create(loc); + Value warpId = rewriter.create(loc); // Map warpId to rows 32 and 64 auto warpIdInGroup = b.and_(warpId, b.i32_val(3)); tmemBase = b.add(tmemBase, b.shl(warpIdInGroup, b.i32_val(5 + 16))); @@ -481,7 +481,7 @@ struct TensorMemoryAllocOpConversion Location loc = op->getLoc(); auto b = TritonLLVMOpBuilder(loc, rewriter); auto ctx = op.getContext(); - Value base = rewriter.create(loc); + Value base = rewriter.create(loc); Value baseInt = b.ptrtoint(i32_ty, base); int colOffset = cast(op->getAttr("tensor_memory_col_offset")) .getValue() diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/TritonGPUToLLVM.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/TritonGPUToLLVM.cpp index 7b4e3f0204bf..854eb283a359 100644 --- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/TritonGPUToLLVM.cpp +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/TritonGPUToLLVM.cpp @@ -1,4 +1,4 @@ -#include "Dialect/NVGPU/IR/Dialect.h" +#include "Dialect/NVG/IR/Dialect.h" #include "TritonNVIDIAGPUToLLVM/Passes.h" #include "TritonNVIDIAGPUToLLVM/Utility.h" #include "mlir/Conversion/ArithToLLVM/ArithToLLVM.h" @@ -53,7 +53,7 @@ class TritonLLVMConversionTarget : public ConversionTarget { addLegalDialect(); addLegalDialect(); addLegalDialect(); - addLegalDialect(); + addLegalDialect(); addIllegalDialect(); addIllegalDialect(); addIllegalDialect(); @@ -191,7 +191,7 @@ struct ConvertTritonGPUToLLVM // Fold CTAId when there is only 1 CTA. int numCTAs = triton::gpu::TritonGPUDialect::getNumCTAs(mod); if (numCTAs == 1) { - mod.walk([](triton::nvgpu::ClusterCTAIdOp id) { + mod.walk([](triton::nvg::ClusterCTAIdOp id) { OpBuilder b(id); Value zero = LLVM::createConstantI32(id->getLoc(), b, 0); id.replaceAllUsesWith(zero); diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/Utility.cpp b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/Utility.cpp index 593adbc7508a..0c9db13ee344 100644 --- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/Utility.cpp +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/Utility.cpp @@ -1,5 +1,5 @@ #include "Utility.h" -#include "Dialect/NVGPU/IR/Dialect.h" +#include "Dialect/NVG/IR/Dialect.h" #include "mlir/Dialect/LLVMIR/NVVMDialect.h" #include "triton/Conversion/TritonGPUToLLVM/Utility.h" #include "triton/Tools/LayoutUtils.h" diff --git a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/Utility.h b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/Utility.h index ce69f44487c4..6d23c312ecb3 100644 --- a/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/Utility.h +++ b/third_party/nvidia/lib/TritonNVIDIAGPUToLLVM/Utility.h @@ -8,7 +8,7 @@ #include "TargetInfo.h" #include "mlir/Conversion/LLVMCommon/Pattern.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" -#include "third_party/nvidia/include/Dialect/NVGPU/IR/Dialect.h" +#include "third_party/nvidia/include/Dialect/NVG/IR/Dialect.h" #include "triton/Analysis/Utility.h" #include "triton/Conversion/MLIRTypes.h" #include "triton/Dialect/TritonNvidiaGPU/IR/Dialect.h" diff --git a/third_party/nvidia/triton_nvidia.cc b/third_party/nvidia/triton_nvidia.cc index b256f112c8e2..c98469543826 100644 --- a/third_party/nvidia/triton_nvidia.cc +++ b/third_party/nvidia/triton_nvidia.cc @@ -1,6 +1,6 @@ -#include "Dialect/NVGPU/IR/Dialect.h" +#include "Dialect/NVG/IR/Dialect.h" #include "Dialect/NVWS/IR/Dialect.h" -#include "NVGPUToLLVM/Passes.h" +#include "NVGToLLVM/Passes.h" #include "TritonNVIDIAGPUToLLVM/Passes.h" #include "cublas_instance.h" #include "mlir/Pass/PassManager.h" @@ -48,7 +48,7 @@ createTritonGPUProxyFenceInsertionWrapper(int32_t capability) { return ttng::createTritonGPUProxyFenceInsertion(options); } -void init_triton_nvidia_passes_ttnvgpuir(py::module &&m) { +void init_triton_nvidia_passes_ttnvgir(py::module &&m) { ADD_PASS_WRAPPER_1("add_plan_cta", ttng::createTritonNvidiaGPUPlanCTAPass, mlir::triton::nvidia_gpu::ClusterInfo *); ADD_PASS_WRAPPER_1("add_fence_insertion", @@ -61,8 +61,8 @@ void init_triton_nvidia_passes_ttnvgpuir(py::module &&m) { ttng::createTritonNvidiaGPUPromoteLHSToTMemPass); ADD_PASS_WRAPPER_0("add_remove_tmem_tokens", ttng::createTritonNvidiaGPURemoveTMEMTokensPass); - ADD_PASS_WRAPPER_0("add_nvgpu_to_llvm", - mlir::triton::createConvertNVGPUToLLVM); + ADD_PASS_WRAPPER_0("add_nvg_to_llvm", + mlir::triton::createConvertNVGToLLVM); ADD_PASS_WRAPPER_0("add_warp_specialize_to_llvm", mlir::triton::createConvertWarpSpecializeToLLVM); ADD_PASS_WRAPPER_0("add_allocate_tensor_memory", @@ -90,7 +90,7 @@ void init_triton_nvidia_passes_nvws(py::module &&m) { void init_triton_hopper_passes(py::module &&m) { // Meta's autoWS ADD_PASS_OPTION_WRAPPER_2("add_hopper_warpspec", - mlir::createNVGPUWarpSpecialization, int, bool); + mlir::createNVGWarpSpecialization, int, bool); } static void checkMatmulConstraints(const std::string &A_dtype, @@ -145,7 +145,7 @@ void init_triton_nvidia(py::module &&m) { auto passes = m.def_submodule("passes"); init_triton_nvidia_passes_nvws(passes.def_submodule("nvws")); init_triton_nvidia_passes_ttgpuir(passes.def_submodule("ttgpuir")); - init_triton_nvidia_passes_ttnvgpuir(passes.def_submodule("ttnvgpuir")); + init_triton_nvidia_passes_ttnvgir(passes.def_submodule("ttnvgir")); init_triton_hopper_passes(passes.def_submodule("hopper")); // cluster info @@ -168,7 +168,7 @@ void init_triton_nvidia(py::module &&m) { m.def("load_dialects", [](mlir::MLIRContext &context) { mlir::DialectRegistry registry; registry.insert(); mlir::registerNVVMDialectTranslation(registry); context.appendDialectRegistry(registry); diff --git a/third_party/nvidia/unittest/Conversion/TritonGPUToLLVM/CMakeLists.txt b/third_party/nvidia/unittest/Conversion/TritonGPUToLLVM/CMakeLists.txt index 3c52f7c7a78d..5f0456ac83da 100644 --- a/third_party/nvidia/unittest/Conversion/TritonGPUToLLVM/CMakeLists.txt +++ b/third_party/nvidia/unittest/Conversion/TritonGPUToLLVM/CMakeLists.txt @@ -4,5 +4,5 @@ add_triton_ut( LIBS TritonGPUToLLVM TritonNVIDIAGPUToLLVM - NVGPUIR MLIRUBToLLVM + NVGIR MLIRUBToLLVM ) diff --git a/third_party/proton/README.md b/third_party/proton/README.md index d0b5cd8a40ab..0a1d3ad09aad 100644 --- a/third_party/proton/README.md +++ b/third_party/proton/README.md @@ -364,7 +364,7 @@ This is because the number of kernels in a graph instance (i.e., `cuGraphExec`) - Instruction sampling -If you encounter permission related problems when using instruction sampling, you can lookup this [page](https://developer.nvidia.com/nvidia-development-tools-solutions-err_nvgpuctrperm-permission-issue-performance-counters) for help. +If you encounter permission related problems when using instruction sampling, you can lookup this [page](https://developer.nvidia.com/nvidia-development-tools-solutions-err_nvgctrperm-permission-issue-performance-counters) for help. The overhead of instruction sampling on NVIDIA GPUs is about 20x using Proton because we haven't enabled continuous sampling yet. Continuous sampling can allow for more runtime optimizations, but it makes it more challenging to attribute performance data back to the GPU kernels because: (1) it enables profiling of concurrent kernels, (2) it doesn't allow profiling of time and instruction samples simultaneously, and (3) it works best if we have a separate thread dedicated to attributing instruction samples to the GPU kernels