Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 29 additions & 0 deletions clang/include/clang/Basic/Builtins.td
Original file line number Diff line number Diff line change
Expand Up @@ -4770,6 +4770,35 @@ def GetDeviceSideMangledName : LangBuiltin<"CUDA_LANG"> {
let Prototype = "char const*(...)";
}

// GPU intrinsics
class GPUBuiltin<string prototype> : Builtin {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We need a way to make these only emitted on 'gpu' compilations.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not particularly. Intrinsics aren't required to lower to non-error states on all targets anyway. If someone wants to use these to write x64 code with implied simt semantics and deal with that later, cool - e.g. ipsc might like it.

let Spellings = ["__builtin_" # NAME];
let Prototype = prototype;
let Attributes = [NoThrow];
}

multiclass GPUGridBuiltin<string prototype> {
def _x : GPUBuiltin<prototype>;
def _y : GPUBuiltin<prototype>;
def _z : GPUBuiltin<prototype>;
}

defm gpu_num_blocks : GPUGridBuiltin<"uint32_t()">;
defm gpu_block_id : GPUGridBuiltin<"uint32_t()">;
defm gpu_num_threads : GPUGridBuiltin<"uint32_t()">;
defm gpu_thread_id : GPUGridBuiltin<"uint32_t()">;

def gpu_ballot : GPUBuiltin<"uint64_t(uint64_t, bool)">;
def gpu_exit : GPUBuiltin<"void()">;
def gpu_lane_id : GPUBuiltin<"uint32_t()">;
def gpu_lane_mask : GPUBuiltin<"uint64_t()">;
def gpu_num_lanes : GPUBuiltin<"uint32_t()">;
def gpu_read_first_lane_u32 : GPUBuiltin<"uint32_t(uint64_t, uint32_t)">;
def gpu_shuffle_idx_u32 : GPUBuiltin<"uint32_t(uint64_t, uint32_t, uint32_t, uint32_t)">;
def gpu_sync_lane : GPUBuiltin<"void(uint64_t)">;
def gpu_sync_threads : GPUBuiltin<"void()">;
def gpu_thread_suspend : GPUBuiltin<"void()">;

// HLSL
def HLSLAddUint64: LangBuiltin<"HLSL_LANG"> {
let Spellings = ["__builtin_hlsl_adduint64"];
Expand Down
158 changes: 158 additions & 0 deletions clang/test/CodeGen/amdgpu-grid-builtins.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
// REQUIRES: amdgpu-registered-target
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -emit-llvm -O1 %s -o - | FileCheck %s

#include <stdint.h>

// CHECK-LABEL: define dso_local noundef i32 @workgroup_id_x(
// CHECK-SAME: ) local_unnamed_addr #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.workgroup.id.x()
// CHECK-NEXT: ret i32 [[TMP0]]
//
uint32_t workgroup_id_x(void)
{
return __builtin_amdgcn_workgroup_id_x();
}

// CHECK-LABEL: define dso_local noundef i32 @workgroup_id_y(
// CHECK-SAME: ) local_unnamed_addr #[[ATTR2:[0-9]+]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.workgroup.id.y()
// CHECK-NEXT: ret i32 [[TMP0]]
//
uint32_t workgroup_id_y(void)
{
return __builtin_amdgcn_workgroup_id_y();
}

// CHECK-LABEL: define dso_local noundef i32 @workgroup_id_z(
// CHECK-SAME: ) local_unnamed_addr #[[ATTR3:[0-9]+]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.workgroup.id.z()
// CHECK-NEXT: ret i32 [[TMP0]]
//
uint32_t workgroup_id_z(void)
{
return __builtin_amdgcn_workgroup_id_z();
}

// CHECK-LABEL: define dso_local noundef range(i32 0, 1024) i32 @workitem_id_x(
// CHECK-SAME: ) local_unnamed_addr #[[ATTR4:[0-9]+]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[TMP0:%.*]] = tail call noundef range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.x()
// CHECK-NEXT: ret i32 [[TMP0]]
//
uint32_t workitem_id_x(void)
{
return __builtin_amdgcn_workitem_id_x();
}

// CHECK-LABEL: define dso_local noundef range(i32 0, 1024) i32 @workitem_id_y(
// CHECK-SAME: ) local_unnamed_addr #[[ATTR5:[0-9]+]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[TMP0:%.*]] = tail call noundef range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.y()
// CHECK-NEXT: ret i32 [[TMP0]]
//
uint32_t workitem_id_y(void)
{
return __builtin_amdgcn_workitem_id_y();
}

// CHECK-LABEL: define dso_local noundef range(i32 0, 1024) i32 @workitem_id_z(
// CHECK-SAME: ) local_unnamed_addr #[[ATTR6:[0-9]+]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[TMP0:%.*]] = tail call noundef range(i32 0, 1024) i32 @llvm.amdgcn.workitem.id.z()
// CHECK-NEXT: ret i32 [[TMP0]]
//
uint32_t workitem_id_z(void)
{
return __builtin_amdgcn_workitem_id_z();
}

// CHECK-LABEL: define dso_local range(i32 1, 1025) i32 @workgroup_size_x(
// CHECK-SAME: ) local_unnamed_addr #[[ATTR7:[0-9]+]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[TMP0:%.*]] = tail call align 8 dereferenceable(256) ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[TMP0]], i64 12
// CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr addrspace(4) [[TMP1]], align 4, !range [[RNG3:![0-9]+]], !invariant.load [[META4:![0-9]+]], !noundef [[META4]]
// CHECK-NEXT: [[CONV:%.*]] = zext nneg i16 [[TMP2]] to i32
// CHECK-NEXT: ret i32 [[CONV]]
//
uint32_t workgroup_size_x(void)
{
return __builtin_amdgcn_workgroup_size_x();
}

// CHECK-LABEL: define dso_local range(i32 1, 1025) i32 @workgroup_size_y(
// CHECK-SAME: ) local_unnamed_addr #[[ATTR7]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[TMP0:%.*]] = tail call align 8 dereferenceable(256) ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[TMP0]], i64 14
// CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr addrspace(4) [[TMP1]], align 2, !range [[RNG3]], !invariant.load [[META4]], !noundef [[META4]]
// CHECK-NEXT: [[CONV:%.*]] = zext nneg i16 [[TMP2]] to i32
// CHECK-NEXT: ret i32 [[CONV]]
//
uint32_t workgroup_size_y(void)
{
return __builtin_amdgcn_workgroup_size_y();
}

// CHECK-LABEL: define dso_local range(i32 1, 1025) i32 @workgroup_size_z(
// CHECK-SAME: ) local_unnamed_addr #[[ATTR7]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[TMP0:%.*]] = tail call align 8 dereferenceable(256) ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[TMP0]], i64 16
// CHECK-NEXT: [[TMP2:%.*]] = load i16, ptr addrspace(4) [[TMP1]], align 8, !range [[RNG3]], !invariant.load [[META4]], !noundef [[META4]]
// CHECK-NEXT: [[CONV:%.*]] = zext nneg i16 [[TMP2]] to i32
// CHECK-NEXT: ret i32 [[CONV]]
//
uint32_t workgroup_size_z(void)
{
return __builtin_amdgcn_workgroup_size_z();
}

// CHECK-LABEL: define dso_local range(i32 1, 0) i32 @grid_size_x(
// CHECK-SAME: ) local_unnamed_addr #[[ATTR8:[0-9]+]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[TMP0:%.*]] = tail call align 4 dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[TMP0]], i64 12
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[TMP1]], align 4, !range [[RNG5:![0-9]+]], !invariant.load [[META4]]
// CHECK-NEXT: ret i32 [[TMP2]]
//
uint32_t grid_size_x(void)
{
return __builtin_amdgcn_grid_size_x();
}

// CHECK-LABEL: define dso_local range(i32 1, 0) i32 @grid_size_y(
// CHECK-SAME: ) local_unnamed_addr #[[ATTR8]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[TMP0:%.*]] = tail call align 4 dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[TMP0]], i64 16
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[TMP1]], align 4, !range [[RNG5]], !invariant.load [[META4]]
// CHECK-NEXT: ret i32 [[TMP2]]
//
uint32_t grid_size_y(void)
{
return __builtin_amdgcn_grid_size_y();
}

// CHECK-LABEL: define dso_local range(i32 1, 0) i32 @grid_size_z(
// CHECK-SAME: ) local_unnamed_addr #[[ATTR8]] {
// CHECK-NEXT: [[ENTRY:.*:]]
// CHECK-NEXT: [[TMP0:%.*]] = tail call align 4 dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr addrspace(4) [[TMP0]], i64 20
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[TMP1]], align 4, !range [[RNG5]], !invariant.load [[META4]]
// CHECK-NEXT: ret i32 [[TMP2]]
//
uint32_t grid_size_z(void)
{
return __builtin_amdgcn_grid_size_z();
}

//.
// CHECK: [[RNG3]] = !{i16 1, i16 1025}
// CHECK: [[META4]] = !{}
// CHECK: [[RNG5]] = !{i32 1, i32 0}
//.
Loading
Loading