Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 36 additions & 0 deletions clang/lib/Sema/SPIRVBuiltins.td
Original file line number Diff line number Diff line change
Expand Up @@ -984,6 +984,42 @@ foreach name = ["GroupLogicalAndKHR", "GroupLogicalOrKHR"] in {
def : SPVBuiltin<name, [Bool, Int, Int, Bool], Attr.Convergent>;
}

def SubgroupShuffleINTELVecType
: GenericType<"SubgroupShuffleINTELVecType",
TypeList<[Char, UChar, Short, UShort, Int, UInt, Float]>,
VecNoScalar>;

foreach name = ["SubgroupShuffleINTEL", "SubgroupShuffleXorINTEL"] in {
def : SPVBuiltin<name, [AGenType1, AGenType1, UInt]>;
def : SPVBuiltin<name, [SubgroupShuffleINTELVecType,
SubgroupShuffleINTELVecType, UInt]>;
}

foreach name = ["SubgroupShuffleUpINTEL", "SubgroupShuffleDownINTEL"] in {
def : SPVBuiltin<name, [AGenType1, AGenType1, AGenType1, UInt]>;
def : SPVBuiltin<name, [SubgroupShuffleINTELVecType,
SubgroupShuffleINTELVecType,
SubgroupShuffleINTELVecType, UInt]>;
}

foreach name = ["SubgroupBlockWriteINTEL"] in {
foreach AS = [GlobalAS, LocalAS] in {
foreach Ty = TLUnsignedInts.List in {
def : SPVBuiltin<name, [Void, PointerType<Ty, AS>, Ty]>;
}
foreach Ty = [UChar, UShort] in {
foreach v = [2, 4, 8, 16] in {
def : SPVBuiltin<name, [Void, PointerType<Ty, AS>, VectorType<Ty, v>]>;
}
}
foreach Ty = [UInt, ULong] in {
foreach v = [2, 4, 8] in {
def : SPVBuiltin<name, [Void, PointerType<Ty, AS>, VectorType<Ty, v>]>;
}
}
}
}

// 3.56.24. Non-Uniform Instructions

foreach name = ["GroupNonUniformElect"] in {
Expand Down
252 changes: 252 additions & 0 deletions clang/test/CodeGenSPIRV/spirv-builtin-lookup-intel-subgroup.cpp

Large diffs are not rendered by default.

210 changes: 92 additions & 118 deletions libclc/libspirv/lib/amdgcn-amdhsa/group/collectives.cl
Original file line number Diff line number Diff line change
Expand Up @@ -38,27 +38,6 @@ __clc__get_group_scratch_float() __asm("__clc__get_group_scratch_float");
__local double *
__clc__get_group_scratch_double() __asm("__clc__get_group_scratch_double");

#define __CLC_DECLARE_SHUFFLES(TYPE, TYPE_MANGLED) \
_CLC_DECL TYPE _Z28__spirv_SubgroupShuffleINTELI##TYPE_MANGLED##ET_S0_j( \
TYPE, int); \
_CLC_DECL TYPE \
_Z30__spirv_SubgroupShuffleUpINTELI##TYPE_MANGLED##ET_S0_S0_j( \
TYPE, TYPE, unsigned int);

__CLC_DECLARE_SHUFFLES(char, a);
__CLC_DECLARE_SHUFFLES(unsigned char, h);
__CLC_DECLARE_SHUFFLES(short, s);
__CLC_DECLARE_SHUFFLES(unsigned short, t);
__CLC_DECLARE_SHUFFLES(int, i);
__CLC_DECLARE_SHUFFLES(unsigned int, j);
__CLC_DECLARE_SHUFFLES(half, DF16_);
__CLC_DECLARE_SHUFFLES(float, f);
__CLC_DECLARE_SHUFFLES(long, l);
__CLC_DECLARE_SHUFFLES(unsigned long, m);
__CLC_DECLARE_SHUFFLES(double, d);

#undef __CLC_DECLARE_SHUFFLES

#define __CLC_APPEND(NAME, SUFFIX) NAME##SUFFIX

#define __CLC_ADD(x, y) (x + y)
Expand All @@ -71,22 +50,19 @@ __CLC_DECLARE_SHUFFLES(double, d);
#define __CLC_LOGICAL_OR(x, y) (x || y)
#define __CLC_LOGICAL_AND(x, y) (x && y)

#define __CLC_SUBGROUP_COLLECTIVE_BODY(OP, TYPE, TYPE_MANGLED, IDENTITY) \
#define __CLC_SUBGROUP_COLLECTIVE_BODY(OP, TYPE, IDENTITY) \
uint sg_lid = __spirv_SubgroupLocalInvocationId(); \
/* Can't use XOR/butterfly shuffles; some lanes may be inactive */ \
for (int o = 1; o < __spirv_SubgroupMaxSize(); o *= 2) { \
TYPE contribution = \
_Z30__spirv_SubgroupShuffleUpINTELI##TYPE_MANGLED##ET_S0_S0_j(x, x, \
o); \
TYPE contribution = __spirv_SubgroupShuffleUpINTEL(x, x, o); \
bool inactive = (sg_lid < o); \
contribution = (inactive) ? IDENTITY : contribution; \
x = OP(x, contribution); \
} \
/* For Reduce, broadcast result from highest active lane */ \
TYPE result; \
if (op == Reduce) { \
result = _Z28__spirv_SubgroupShuffleINTELI##TYPE_MANGLED##ET_S0_j( \
x, __spirv_SubgroupSize() - 1); \
result = __spirv_SubgroupShuffleINTEL(x, __spirv_SubgroupSize() - 1); \
*carry = result; \
} /* For InclusiveScan, use results as computed */ \
else if (op == InclusiveScan) { \
Expand All @@ -95,101 +71,100 @@ __CLC_DECLARE_SHUFFLES(double, d);
} /* For ExclusiveScan, shift and prepend identity */ \
else if (op == ExclusiveScan) { \
*carry = x; \
result = _Z30__spirv_SubgroupShuffleUpINTELI##TYPE_MANGLED##ET_S0_S0_j( \
x, x, 1); \
result = __spirv_SubgroupShuffleUpINTEL(x, x, 1); \
if (sg_lid == 0) { \
result = IDENTITY; \
} \
} \
return result;

#define __CLC_SUBGROUP_COLLECTIVE(NAME, OP, TYPE, TYPE_MANGLED, IDENTITY) \
#define __CLC_SUBGROUP_COLLECTIVE(NAME, OP, TYPE, IDENTITY) \
_CLC_DEF _CLC_OVERLOAD _CLC_CONVERGENT TYPE __CLC_APPEND( \
__clc__Subgroup, NAME)(uint op, TYPE x, TYPE * carry) { \
__CLC_SUBGROUP_COLLECTIVE_BODY(OP, TYPE, TYPE_MANGLED, IDENTITY) \
__CLC_SUBGROUP_COLLECTIVE_BODY(OP, TYPE, IDENTITY) \
}

__CLC_SUBGROUP_COLLECTIVE(IAdd, __CLC_ADD, char, a, 0)
__CLC_SUBGROUP_COLLECTIVE(IAdd, __CLC_ADD, uchar, h, 0)
__CLC_SUBGROUP_COLLECTIVE(IAdd, __CLC_ADD, short, s, 0)
__CLC_SUBGROUP_COLLECTIVE(IAdd, __CLC_ADD, ushort, t, 0)
__CLC_SUBGROUP_COLLECTIVE(IAdd, __CLC_ADD, int, i, 0)
__CLC_SUBGROUP_COLLECTIVE(IAdd, __CLC_ADD, uint, j, 0)
__CLC_SUBGROUP_COLLECTIVE(IAdd, __CLC_ADD, long, l, 0)
__CLC_SUBGROUP_COLLECTIVE(IAdd, __CLC_ADD, ulong, m, 0)
__CLC_SUBGROUP_COLLECTIVE(FAdd, __CLC_ADD, half, DF16_, 0)
__CLC_SUBGROUP_COLLECTIVE(FAdd, __CLC_ADD, float, f, 0)
__CLC_SUBGROUP_COLLECTIVE(FAdd, __CLC_ADD, double, d, 0)

__CLC_SUBGROUP_COLLECTIVE(IMulKHR, __CLC_MUL, char, a, 1)
__CLC_SUBGROUP_COLLECTIVE(IMulKHR, __CLC_MUL, uchar, h, 1)
__CLC_SUBGROUP_COLLECTIVE(IMulKHR, __CLC_MUL, short, s, 1)
__CLC_SUBGROUP_COLLECTIVE(IMulKHR, __CLC_MUL, ushort, t, 1)
__CLC_SUBGROUP_COLLECTIVE(IMulKHR, __CLC_MUL, int, i, 1)
__CLC_SUBGROUP_COLLECTIVE(IMulKHR, __CLC_MUL, uint, j, 1)
__CLC_SUBGROUP_COLLECTIVE(IMulKHR, __CLC_MUL, long, l, 1)
__CLC_SUBGROUP_COLLECTIVE(IMulKHR, __CLC_MUL, ulong, m, 1)
__CLC_SUBGROUP_COLLECTIVE(FMulKHR, __CLC_MUL, half, DF16_, 1)
__CLC_SUBGROUP_COLLECTIVE(FMulKHR, __CLC_MUL, float, f, 1)
__CLC_SUBGROUP_COLLECTIVE(FMulKHR, __CLC_MUL, double, d, 1)

__CLC_SUBGROUP_COLLECTIVE(SMin, __CLC_MIN, char, a, CHAR_MAX)
__CLC_SUBGROUP_COLLECTIVE(UMin, __CLC_MIN, uchar, h, UCHAR_MAX)
__CLC_SUBGROUP_COLLECTIVE(SMin, __CLC_MIN, short, s, SHRT_MAX)
__CLC_SUBGROUP_COLLECTIVE(UMin, __CLC_MIN, ushort, t, USHRT_MAX)
__CLC_SUBGROUP_COLLECTIVE(SMin, __CLC_MIN, int, i, INT_MAX)
__CLC_SUBGROUP_COLLECTIVE(UMin, __CLC_MIN, uint, j, UINT_MAX)
__CLC_SUBGROUP_COLLECTIVE(SMin, __CLC_MIN, long, l, LONG_MAX)
__CLC_SUBGROUP_COLLECTIVE(UMin, __CLC_MIN, ulong, m, ULONG_MAX)
__CLC_SUBGROUP_COLLECTIVE(FMin, __CLC_MIN, half, DF16_, INFINITY)
__CLC_SUBGROUP_COLLECTIVE(FMin, __CLC_MIN, float, f, INFINITY)
__CLC_SUBGROUP_COLLECTIVE(FMin, __CLC_MIN, double, d, INFINITY)

__CLC_SUBGROUP_COLLECTIVE(SMax, __CLC_MAX, char, a, CHAR_MIN)
__CLC_SUBGROUP_COLLECTIVE(UMax, __CLC_MAX, uchar, h, 0)
__CLC_SUBGROUP_COLLECTIVE(SMax, __CLC_MAX, short, s, SHRT_MIN)
__CLC_SUBGROUP_COLLECTIVE(UMax, __CLC_MAX, ushort, t, 0)
__CLC_SUBGROUP_COLLECTIVE(SMax, __CLC_MAX, int, i, INT_MIN)
__CLC_SUBGROUP_COLLECTIVE(UMax, __CLC_MAX, uint, j, 0)
__CLC_SUBGROUP_COLLECTIVE(SMax, __CLC_MAX, long, l, LONG_MIN)
__CLC_SUBGROUP_COLLECTIVE(UMax, __CLC_MAX, ulong, m, 0)
__CLC_SUBGROUP_COLLECTIVE(FMax, __CLC_MAX, half, DF16_, -INFINITY)
__CLC_SUBGROUP_COLLECTIVE(FMax, __CLC_MAX, float, f, -INFINITY)
__CLC_SUBGROUP_COLLECTIVE(FMax, __CLC_MAX, double, d, -INFINITY)

__CLC_SUBGROUP_COLLECTIVE(All, __CLC_AND, bool, a, true)
__CLC_SUBGROUP_COLLECTIVE(Any, __CLC_OR, bool, a, false)

__CLC_SUBGROUP_COLLECTIVE(BitwiseAndKHR, __CLC_AND, uchar, h, ~0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseOrKHR, __CLC_OR, uchar, h, 0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseXorKHR, __CLC_XOR, uchar, h, 0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseAndKHR, __CLC_AND, char, a, ~0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseOrKHR, __CLC_OR, char, a, 0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseXorKHR, __CLC_XOR, char, a, 0)

__CLC_SUBGROUP_COLLECTIVE(BitwiseAndKHR, __CLC_AND, ushort, t, ~0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseOrKHR, __CLC_OR, ushort, t, 0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseXorKHR, __CLC_XOR, ushort, t, 0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseAndKHR, __CLC_AND, short, s, ~0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseOrKHR, __CLC_OR, short, s, 0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseXorKHR, __CLC_XOR, short, s, 0)

__CLC_SUBGROUP_COLLECTIVE(BitwiseAndKHR, __CLC_AND, uint, j, ~0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseOrKHR, __CLC_OR, uint, j, 0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseXorKHR, __CLC_XOR, uint, j, 0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseAndKHR, __CLC_AND, int, i, ~0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseOrKHR, __CLC_OR, int, i, 0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseXorKHR, __CLC_XOR, int, i, 0)

__CLC_SUBGROUP_COLLECTIVE(BitwiseAndKHR, __CLC_AND, ulong, m, ~0l)
__CLC_SUBGROUP_COLLECTIVE(BitwiseOrKHR, __CLC_OR, ulong, m, 0l)
__CLC_SUBGROUP_COLLECTIVE(BitwiseXorKHR, __CLC_XOR, ulong, m, 0l)
__CLC_SUBGROUP_COLLECTIVE(BitwiseAndKHR, __CLC_AND, long, l, ~0l)
__CLC_SUBGROUP_COLLECTIVE(BitwiseOrKHR, __CLC_OR, long, l, 0l)
__CLC_SUBGROUP_COLLECTIVE(BitwiseXorKHR, __CLC_XOR, long, l, 0l)

__CLC_SUBGROUP_COLLECTIVE(LogicalOrKHR, __CLC_LOGICAL_OR, bool, a, false)
__CLC_SUBGROUP_COLLECTIVE(LogicalAndKHR, __CLC_LOGICAL_AND, bool, a, true)
__CLC_SUBGROUP_COLLECTIVE(IAdd, __CLC_ADD, char, 0)
__CLC_SUBGROUP_COLLECTIVE(IAdd, __CLC_ADD, uchar, 0)
__CLC_SUBGROUP_COLLECTIVE(IAdd, __CLC_ADD, short, 0)
__CLC_SUBGROUP_COLLECTIVE(IAdd, __CLC_ADD, ushort, 0)
__CLC_SUBGROUP_COLLECTIVE(IAdd, __CLC_ADD, int, 0)
__CLC_SUBGROUP_COLLECTIVE(IAdd, __CLC_ADD, uint, 0)
__CLC_SUBGROUP_COLLECTIVE(IAdd, __CLC_ADD, long, 0)
__CLC_SUBGROUP_COLLECTIVE(IAdd, __CLC_ADD, ulong, 0)
__CLC_SUBGROUP_COLLECTIVE(FAdd, __CLC_ADD, half, 0)
__CLC_SUBGROUP_COLLECTIVE(FAdd, __CLC_ADD, float, 0)
__CLC_SUBGROUP_COLLECTIVE(FAdd, __CLC_ADD, double, 0)

__CLC_SUBGROUP_COLLECTIVE(IMulKHR, __CLC_MUL, char, 1)
__CLC_SUBGROUP_COLLECTIVE(IMulKHR, __CLC_MUL, uchar, 1)
__CLC_SUBGROUP_COLLECTIVE(IMulKHR, __CLC_MUL, short, 1)
__CLC_SUBGROUP_COLLECTIVE(IMulKHR, __CLC_MUL, ushort, 1)
__CLC_SUBGROUP_COLLECTIVE(IMulKHR, __CLC_MUL, int, 1)
__CLC_SUBGROUP_COLLECTIVE(IMulKHR, __CLC_MUL, uint, 1)
__CLC_SUBGROUP_COLLECTIVE(IMulKHR, __CLC_MUL, long, 1)
__CLC_SUBGROUP_COLLECTIVE(IMulKHR, __CLC_MUL, ulong, 1)
__CLC_SUBGROUP_COLLECTIVE(FMulKHR, __CLC_MUL, half, 1)
__CLC_SUBGROUP_COLLECTIVE(FMulKHR, __CLC_MUL, float, 1)
__CLC_SUBGROUP_COLLECTIVE(FMulKHR, __CLC_MUL, double, 1)

__CLC_SUBGROUP_COLLECTIVE(SMin, __CLC_MIN, char, CHAR_MAX)
__CLC_SUBGROUP_COLLECTIVE(UMin, __CLC_MIN, uchar, UCHAR_MAX)
__CLC_SUBGROUP_COLLECTIVE(SMin, __CLC_MIN, short, SHRT_MAX)
__CLC_SUBGROUP_COLLECTIVE(UMin, __CLC_MIN, ushort, USHRT_MAX)
__CLC_SUBGROUP_COLLECTIVE(SMin, __CLC_MIN, int, INT_MAX)
__CLC_SUBGROUP_COLLECTIVE(UMin, __CLC_MIN, uint, UINT_MAX)
__CLC_SUBGROUP_COLLECTIVE(SMin, __CLC_MIN, long, LONG_MAX)
__CLC_SUBGROUP_COLLECTIVE(UMin, __CLC_MIN, ulong, ULONG_MAX)
__CLC_SUBGROUP_COLLECTIVE(FMin, __CLC_MIN, half, INFINITY)
__CLC_SUBGROUP_COLLECTIVE(FMin, __CLC_MIN, float, INFINITY)
__CLC_SUBGROUP_COLLECTIVE(FMin, __CLC_MIN, double, INFINITY)

__CLC_SUBGROUP_COLLECTIVE(SMax, __CLC_MAX, char, CHAR_MIN)
__CLC_SUBGROUP_COLLECTIVE(UMax, __CLC_MAX, uchar, 0)
__CLC_SUBGROUP_COLLECTIVE(SMax, __CLC_MAX, short, SHRT_MIN)
__CLC_SUBGROUP_COLLECTIVE(UMax, __CLC_MAX, ushort, 0)
__CLC_SUBGROUP_COLLECTIVE(SMax, __CLC_MAX, int, INT_MIN)
__CLC_SUBGROUP_COLLECTIVE(UMax, __CLC_MAX, uint, 0)
__CLC_SUBGROUP_COLLECTIVE(SMax, __CLC_MAX, long, LONG_MIN)
__CLC_SUBGROUP_COLLECTIVE(UMax, __CLC_MAX, ulong, 0)
__CLC_SUBGROUP_COLLECTIVE(FMax, __CLC_MAX, half, -INFINITY)
__CLC_SUBGROUP_COLLECTIVE(FMax, __CLC_MAX, float, -INFINITY)
__CLC_SUBGROUP_COLLECTIVE(FMax, __CLC_MAX, double, -INFINITY)

__CLC_SUBGROUP_COLLECTIVE(All, __CLC_AND, bool, true)
__CLC_SUBGROUP_COLLECTIVE(Any, __CLC_OR, bool, false)

__CLC_SUBGROUP_COLLECTIVE(BitwiseAndKHR, __CLC_AND, uchar, ~0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseOrKHR, __CLC_OR, uchar, 0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseXorKHR, __CLC_XOR, uchar, 0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseAndKHR, __CLC_AND, char, ~0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseOrKHR, __CLC_OR, char, 0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseXorKHR, __CLC_XOR, char, 0)

__CLC_SUBGROUP_COLLECTIVE(BitwiseAndKHR, __CLC_AND, ushort, ~0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseOrKHR, __CLC_OR, ushort, 0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseXorKHR, __CLC_XOR, ushort, 0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseAndKHR, __CLC_AND, short, ~0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseOrKHR, __CLC_OR, short, 0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseXorKHR, __CLC_XOR, short, 0)

__CLC_SUBGROUP_COLLECTIVE(BitwiseAndKHR, __CLC_AND, uint, ~0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseOrKHR, __CLC_OR, uint, 0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseXorKHR, __CLC_XOR, uint, 0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseAndKHR, __CLC_AND, int, ~0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseOrKHR, __CLC_OR, int, 0)
__CLC_SUBGROUP_COLLECTIVE(BitwiseXorKHR, __CLC_XOR, int, 0)

__CLC_SUBGROUP_COLLECTIVE(BitwiseAndKHR, __CLC_AND, ulong, ~0l)
__CLC_SUBGROUP_COLLECTIVE(BitwiseOrKHR, __CLC_OR, ulong, 0l)
__CLC_SUBGROUP_COLLECTIVE(BitwiseXorKHR, __CLC_XOR, ulong, 0l)
__CLC_SUBGROUP_COLLECTIVE(BitwiseAndKHR, __CLC_AND, long, ~0l)
__CLC_SUBGROUP_COLLECTIVE(BitwiseOrKHR, __CLC_OR, long, 0l)
__CLC_SUBGROUP_COLLECTIVE(BitwiseXorKHR, __CLC_XOR, long, 0l)

__CLC_SUBGROUP_COLLECTIVE(LogicalOrKHR, __CLC_LOGICAL_OR, bool, false)
__CLC_SUBGROUP_COLLECTIVE(LogicalAndKHR, __CLC_LOGICAL_AND, bool, true)

#undef __CLC_SUBGROUP_COLLECTIVE_BODY
#undef __CLC_SUBGROUP_COLLECTIVE
Expand Down Expand Up @@ -380,10 +355,9 @@ long __clc__3d_to_linear_local_id(ulong3 id) {

#define __CLC_GROUP_BROADCAST(TYPE, TYPE_MANGLED) \
_CLC_DEF _CLC_OVERLOAD _CLC_CONVERGENT TYPE __spirv_GroupBroadcast( \
int scope, TYPE x, ulong local_id) { \
int scope, TYPE x, ulong local_id) { \
if (scope == Subgroup) { \
return _Z28__spirv_SubgroupShuffleINTELI##TYPE_MANGLED##ET_S0_j( \
x, local_id); \
return __spirv_SubgroupShuffleINTEL(x, local_id); \
} \
bool source = (__spirv_LocalInvocationIndex() == local_id); \
__local TYPE *scratch = __CLC_APPEND(__clc__get_group_scratch_, TYPE)(); \
Expand All @@ -396,17 +370,17 @@ long __clc__3d_to_linear_local_id(ulong3 id) {
return result; \
} \
_CLC_DEF _CLC_OVERLOAD _CLC_CONVERGENT TYPE __spirv_GroupBroadcast( \
int scope, TYPE x, ulong2 local_id) { \
int scope, TYPE x, ulong2 local_id) { \
ulong linear_local_id = __clc__2d_to_linear_local_id(local_id); \
return __spirv_GroupBroadcast(scope, x, linear_local_id); \
} \
_CLC_DEF _CLC_OVERLOAD _CLC_CONVERGENT TYPE __spirv_GroupBroadcast( \
int scope, TYPE x, ulong3 local_id) { \
int scope, TYPE x, ulong3 local_id) { \
ulong linear_local_id = __clc__3d_to_linear_local_id(local_id); \
return __spirv_GroupBroadcast(scope, x, linear_local_id); \
} \
_CLC_DEF _CLC_OVERLOAD _CLC_CONVERGENT TYPE __spirv_GroupBroadcast( \
int scope, TYPE x, uint local_id) { \
int scope, TYPE x, uint local_id) { \
return __spirv_GroupBroadcast(scope, x, (ulong)local_id); \
}
__CLC_GROUP_BROADCAST(char, a);
Expand Down
Loading