Skip to content

Commit b464aec

Browse files
committed
extra scalar vector overloads for clamp
1 parent db70d76 commit b464aec

File tree

3 files changed

+96
-37
lines changed

3 files changed

+96
-37
lines changed

clang/lib/Headers/hlsl/hlsl_alias_intrinsics.h

Lines changed: 63 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -35,26 +35,44 @@ namespace hlsl {
3535
#define _HLSL_16BIT_AVAILABILITY_STAGE(environment, version, stage)
3636
#endif
3737

38-
#define GEN_VEC_SCALAR_OVERLOADS(FUNC_NAME, BASE_TYPE, AVAIL) \
39-
GEN_BOTH_OVERLOADS(FUNC_NAME, BASE_TYPE, BASE_TYPE##2, AVAIL) \
40-
GEN_BOTH_OVERLOADS(FUNC_NAME, BASE_TYPE, BASE_TYPE##3, AVAIL) \
41-
GEN_BOTH_OVERLOADS(FUNC_NAME, BASE_TYPE, BASE_TYPE##4, AVAIL)
42-
43-
#define GEN_BOTH_OVERLOADS(FUNC_NAME, BASE_TYPE, VECTOR_TYPE, AVAIL) \
44-
IF_TRUE_##AVAIL( \
45-
_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)) constexpr VECTOR_TYPE \
46-
FUNC_NAME(VECTOR_TYPE p0, BASE_TYPE p1) { \
47-
return __builtin_elementwise_##FUNC_NAME(p0, (VECTOR_TYPE)p1); \
38+
#define _HLSL_CAT(a,b) a##b
39+
#define _HLSL_VEC_SCALAR_OVERLOADS(NAME, BASE_T, AVAIL) \
40+
_HLSL_ALL_OVERLOADS(NAME, BASE_T, AVAIL, _HLSL_CAT(_HLSL_NUM_ARGS_,NAME))
41+
42+
#define _HLSL_ALL_OVERLOADS(NAME, BASE_T, AVAIL, NUM_ARGS) \
43+
_HLSL_CAT(_HLSL_BOTH_OVERLOADS_,NUM_ARGS)(NAME, BASE_T, _HLSL_CAT(BASE_T,2), AVAIL) \
44+
_HLSL_CAT(_HLSL_BOTH_OVERLOADS_,NUM_ARGS)(NAME, BASE_T, _HLSL_CAT(BASE_T,3), AVAIL) \
45+
_HLSL_CAT(_HLSL_BOTH_OVERLOADS_,NUM_ARGS)(NAME, BASE_T, _HLSL_CAT(BASE_T,4), AVAIL)
46+
47+
#define _HLSL_BOTH_OVERLOADS_2(NAME, BASE_T, VECTOR_T, AVAIL) \
48+
_HLSL_CAT(_HLSL_IF_TRUE_,AVAIL)( \
49+
_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)) constexpr VECTOR_T \
50+
NAME(VECTOR_T p0, BASE_T p1) { \
51+
return _HLSL_CAT(__builtin_elementwise_,NAME)(p0, (VECTOR_T)p1); \
4852
} \
49-
IF_TRUE_##AVAIL( \
50-
_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)) constexpr VECTOR_TYPE \
51-
FUNC_NAME(BASE_TYPE p0, VECTOR_TYPE p1) { \
52-
return __builtin_elementwise_##FUNC_NAME((VECTOR_TYPE)p0, p1); \
53+
_HLSL_CAT(_HLSL_IF_TRUE_,AVAIL)( \
54+
_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)) constexpr VECTOR_T \
55+
NAME(BASE_T p0, VECTOR_T p1) { \
56+
return _HLSL_CAT(__builtin_elementwise_,NAME)((VECTOR_T)p0, p1); \
5357
}
5458

55-
#define IF_TRUE_0(EXPR)
56-
#define IF_TRUE_1(EXPR) EXPR
59+
#define _HLSL_BOTH_OVERLOADS_3(NAME, BASE_T, VECTOR_T, AVAIL) \
60+
_HLSL_CAT(_HLSL_IF_TRUE_,AVAIL)(_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)) \
61+
constexpr VECTOR_T NAME(VECTOR_T p0, VECTOR_T p1, BASE_T p2) { \
62+
return _HLSL_CAT(__builtin_hlsl_elementwise_,NAME)(p0, p1, (VECTOR_T)p2); \
63+
} \
64+
_HLSL_CAT(_HLSL_IF_TRUE_,AVAIL)(_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)) \
65+
constexpr VECTOR_T NAME(VECTOR_T p0, BASE_T p1, VECTOR_T p2) { \
66+
return _HLSL_CAT(__builtin_hlsl_elementwise_,NAME)(p0, (VECTOR_T)p1, p2); \
67+
}
68+
69+
#define _HLSL_IF_TRUE_0(EXPR)
70+
#define _HLSL_IF_TRUE_1(EXPR) EXPR
5771

72+
#define _HLSL_NUM_ARGS_min 2
73+
#define _HLSL_NUM_ARGS_max 2
74+
#define _HLSL_NUM_ARGS_clamp 3
75+
5876
//===----------------------------------------------------------------------===//
5977
// abs builtins
6078
//===----------------------------------------------------------------------===//
@@ -582,7 +600,8 @@ half3 clamp(half3, half3, half3);
582600
_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
583601
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
584602
half4 clamp(half4, half4, half4);
585-
603+
_HLSL_VEC_SCALAR_OVERLOADS(clamp, half, 1)
604+
586605
#ifdef __HLSL_ENABLE_16_BIT
587606
_HLSL_AVAILABILITY(shadermodel, 6.2)
588607
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
@@ -596,7 +615,8 @@ int16_t3 clamp(int16_t3, int16_t3, int16_t3);
596615
_HLSL_AVAILABILITY(shadermodel, 6.2)
597616
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
598617
int16_t4 clamp(int16_t4, int16_t4, int16_t4);
599-
618+
_HLSL_VEC_SCALAR_OVERLOADS(clamp, int16_t, 1)
619+
600620
_HLSL_AVAILABILITY(shadermodel, 6.2)
601621
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
602622
uint16_t clamp(uint16_t, uint16_t, uint16_t);
@@ -609,6 +629,7 @@ uint16_t3 clamp(uint16_t3, uint16_t3, uint16_t3);
609629
_HLSL_AVAILABILITY(shadermodel, 6.2)
610630
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
611631
uint16_t4 clamp(uint16_t4, uint16_t4, uint16_t4);
632+
_HLSL_VEC_SCALAR_OVERLOADS(clamp, uint16_t, 1)
612633
#endif
613634

614635
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
@@ -619,6 +640,7 @@ _HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
619640
int3 clamp(int3, int3, int3);
620641
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
621642
int4 clamp(int4, int4, int4);
643+
_HLSL_VEC_SCALAR_OVERLOADS(clamp, int, 0)
622644

623645
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
624646
uint clamp(uint, uint, uint);
@@ -628,6 +650,7 @@ _HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
628650
uint3 clamp(uint3, uint3, uint3);
629651
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
630652
uint4 clamp(uint4, uint4, uint4);
653+
_HLSL_VEC_SCALAR_OVERLOADS(clamp, uint, 0)
631654

632655
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
633656
int64_t clamp(int64_t, int64_t, int64_t);
@@ -637,6 +660,7 @@ _HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
637660
int64_t3 clamp(int64_t3, int64_t3, int64_t3);
638661
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
639662
int64_t4 clamp(int64_t4, int64_t4, int64_t4);
663+
_HLSL_VEC_SCALAR_OVERLOADS(clamp, int64_t, 0)
640664

641665
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
642666
uint64_t clamp(uint64_t, uint64_t, uint64_t);
@@ -646,6 +670,7 @@ _HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
646670
uint64_t3 clamp(uint64_t3, uint64_t3, uint64_t3);
647671
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
648672
uint64_t4 clamp(uint64_t4, uint64_t4, uint64_t4);
673+
_HLSL_VEC_SCALAR_OVERLOADS(clamp, uint64_t, 0)
649674

650675
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
651676
float clamp(float, float, float);
@@ -655,6 +680,7 @@ _HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
655680
float3 clamp(float3, float3, float3);
656681
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
657682
float4 clamp(float4, float4, float4);
683+
_HLSL_VEC_SCALAR_OVERLOADS(clamp, float, 0)
658684

659685
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
660686
double clamp(double, double, double);
@@ -664,6 +690,7 @@ _HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
664690
double3 clamp(double3, double3, double3);
665691
_HLSL_BUILTIN_ALIAS(__builtin_hlsl_elementwise_clamp)
666692
double4 clamp(double4, double4, double4);
693+
_HLSL_VEC_SCALAR_OVERLOADS(clamp, double, 0)
667694

668695
//===----------------------------------------------------------------------===//
669696
// clip builtins
@@ -1576,7 +1603,7 @@ half3 max(half3, half3);
15761603
_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
15771604
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
15781605
half4 max(half4, half4);
1579-
GEN_VEC_SCALAR_OVERLOADS(max, half, 1)
1606+
_HLSL_VEC_SCALAR_OVERLOADS(max, half, 1)
15801607

15811608
#ifdef __HLSL_ENABLE_16_BIT
15821609
_HLSL_AVAILABILITY(shadermodel, 6.2)
@@ -1591,7 +1618,7 @@ int16_t3 max(int16_t3, int16_t3);
15911618
_HLSL_AVAILABILITY(shadermodel, 6.2)
15921619
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
15931620
int16_t4 max(int16_t4, int16_t4);
1594-
GEN_VEC_SCALAR_OVERLOADS(max, int16_t, 1)
1621+
_HLSL_VEC_SCALAR_OVERLOADS(max, int16_t, 1)
15951622

15961623
_HLSL_AVAILABILITY(shadermodel, 6.2)
15971624
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
@@ -1605,7 +1632,7 @@ uint16_t3 max(uint16_t3, uint16_t3);
16051632
_HLSL_AVAILABILITY(shadermodel, 6.2)
16061633
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
16071634
uint16_t4 max(uint16_t4, uint16_t4);
1608-
GEN_VEC_SCALAR_OVERLOADS(max, uint16_t, 1)
1635+
_HLSL_VEC_SCALAR_OVERLOADS(max, uint16_t, 1)
16091636
#endif
16101637

16111638
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
@@ -1616,7 +1643,7 @@ _HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
16161643
int3 max(int3, int3);
16171644
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
16181645
int4 max(int4, int4);
1619-
GEN_VEC_SCALAR_OVERLOADS(max, int, 0)
1646+
_HLSL_VEC_SCALAR_OVERLOADS(max, int, 0)
16201647

16211648
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
16221649
uint max(uint, uint);
@@ -1626,7 +1653,7 @@ _HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
16261653
uint3 max(uint3, uint3);
16271654
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
16281655
uint4 max(uint4, uint4);
1629-
GEN_VEC_SCALAR_OVERLOADS(max, uint, 0)
1656+
_HLSL_VEC_SCALAR_OVERLOADS(max, uint, 0)
16301657

16311658
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
16321659
int64_t max(int64_t, int64_t);
@@ -1636,7 +1663,7 @@ _HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
16361663
int64_t3 max(int64_t3, int64_t3);
16371664
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
16381665
int64_t4 max(int64_t4, int64_t4);
1639-
GEN_VEC_SCALAR_OVERLOADS(max, int64_t, 0)
1666+
_HLSL_VEC_SCALAR_OVERLOADS(max, int64_t, 0)
16401667

16411668
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
16421669
uint64_t max(uint64_t, uint64_t);
@@ -1646,7 +1673,7 @@ _HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
16461673
uint64_t3 max(uint64_t3, uint64_t3);
16471674
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
16481675
uint64_t4 max(uint64_t4, uint64_t4);
1649-
GEN_VEC_SCALAR_OVERLOADS(max, uint64_t, 0)
1676+
_HLSL_VEC_SCALAR_OVERLOADS(max, uint64_t, 0)
16501677

16511678
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
16521679
float max(float, float);
@@ -1656,7 +1683,7 @@ _HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
16561683
float3 max(float3, float3);
16571684
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
16581685
float4 max(float4, float4);
1659-
GEN_VEC_SCALAR_OVERLOADS(max, float, 0)
1686+
_HLSL_VEC_SCALAR_OVERLOADS(max, float, 0)
16601687

16611688
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
16621689
double max(double, double);
@@ -1666,7 +1693,7 @@ _HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
16661693
double3 max(double3, double3);
16671694
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
16681695
double4 max(double4, double4);
1669-
GEN_VEC_SCALAR_OVERLOADS(max, double, 0)
1696+
_HLSL_VEC_SCALAR_OVERLOADS(max, double, 0)
16701697

16711698
//===----------------------------------------------------------------------===//
16721699
// min builtins
@@ -1689,7 +1716,7 @@ half3 min(half3, half3);
16891716
_HLSL_16BIT_AVAILABILITY(shadermodel, 6.2)
16901717
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
16911718
half4 min(half4, half4);
1692-
GEN_VEC_SCALAR_OVERLOADS(min, half, 1)
1719+
_HLSL_VEC_SCALAR_OVERLOADS(min, half, 1)
16931720

16941721
#ifdef __HLSL_ENABLE_16_BIT
16951722
_HLSL_AVAILABILITY(shadermodel, 6.2)
@@ -1704,7 +1731,7 @@ int16_t3 min(int16_t3, int16_t3);
17041731
_HLSL_AVAILABILITY(shadermodel, 6.2)
17051732
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
17061733
int16_t4 min(int16_t4, int16_t4);
1707-
GEN_VEC_SCALAR_OVERLOADS(min, int16_t, 1)
1734+
_HLSL_VEC_SCALAR_OVERLOADS(min, int16_t, 1)
17081735

17091736
_HLSL_AVAILABILITY(shadermodel, 6.2)
17101737
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
@@ -1718,7 +1745,7 @@ uint16_t3 min(uint16_t3, uint16_t3);
17181745
_HLSL_AVAILABILITY(shadermodel, 6.2)
17191746
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
17201747
uint16_t4 min(uint16_t4, uint16_t4);
1721-
GEN_VEC_SCALAR_OVERLOADS(min, uint16_t, 1)
1748+
_HLSL_VEC_SCALAR_OVERLOADS(min, uint16_t, 1)
17221749
#endif
17231750

17241751
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
@@ -1729,7 +1756,7 @@ _HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
17291756
int3 min(int3, int3);
17301757
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
17311758
int4 min(int4, int4);
1732-
GEN_VEC_SCALAR_OVERLOADS(min, int, 0)
1759+
_HLSL_VEC_SCALAR_OVERLOADS(min, int, 0)
17331760

17341761
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
17351762
uint min(uint, uint);
@@ -1739,7 +1766,7 @@ _HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
17391766
uint3 min(uint3, uint3);
17401767
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
17411768
uint4 min(uint4, uint4);
1742-
GEN_VEC_SCALAR_OVERLOADS(min, uint, 0)
1769+
_HLSL_VEC_SCALAR_OVERLOADS(min, uint, 0)
17431770

17441771
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
17451772
float min(float, float);
@@ -1749,7 +1776,7 @@ _HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
17491776
float3 min(float3, float3);
17501777
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
17511778
float4 min(float4, float4);
1752-
GEN_VEC_SCALAR_OVERLOADS(min, float, 0)
1779+
_HLSL_VEC_SCALAR_OVERLOADS(min, float, 0)
17531780

17541781
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
17551782
int64_t min(int64_t, int64_t);
@@ -1759,7 +1786,7 @@ _HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
17591786
int64_t3 min(int64_t3, int64_t3);
17601787
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
17611788
int64_t4 min(int64_t4, int64_t4);
1762-
GEN_VEC_SCALAR_OVERLOADS(min, int64_t, 0)
1789+
_HLSL_VEC_SCALAR_OVERLOADS(min, int64_t, 0)
17631790

17641791
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
17651792
uint64_t min(uint64_t, uint64_t);
@@ -1769,7 +1796,7 @@ _HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
17691796
uint64_t3 min(uint64_t3, uint64_t3);
17701797
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
17711798
uint64_t4 min(uint64_t4, uint64_t4);
1772-
GEN_VEC_SCALAR_OVERLOADS(min, uint64_t, 0)
1799+
_HLSL_VEC_SCALAR_OVERLOADS(min, uint64_t, 0)
17731800

17741801
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
17751802
double min(double, double);
@@ -1779,7 +1806,7 @@ _HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
17791806
double3 min(double3, double3);
17801807
_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
17811808
double4 min(double4, double4);
1782-
GEN_VEC_SCALAR_OVERLOADS(min, double, 0)
1809+
_HLSL_VEC_SCALAR_OVERLOADS(min, double, 0)
17831810

17841811
//===----------------------------------------------------------------------===//
17851812
// normalize builtins

clang/test/CodeGenHLSL/builtins/clamp.hlsl

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,9 @@ int16_t3 test_clamp_short3(int16_t3 p0, int16_t3 p1) { return clamp(p0, p1,p1);
2828
// NATIVE_HALF: define [[FNATTRS]] <4 x i16> @_Z17test_clamp_short4
2929
// NATIVE_HALF: call <4 x i16> @llvm.[[TARGET]].sclamp.v4i16
3030
int16_t4 test_clamp_short4(int16_t4 p0, int16_t4 p1) { return clamp(p0, p1,p1); }
31+
// NATIVE_HALF: define [[FNATTRS]] <4 x i16> {{.*}}test_clamp_short4_mismatch
32+
// NATIVE_HALF: call <4 x i16> @llvm.[[TARGET]].sclamp.v4i16
33+
int16_t4 test_clamp_short4_mismatch(int16_t4 p0, int16_t p1) { return clamp(p0, p0,p1); }
3134

3235
// NATIVE_HALF: define [[FNATTRS]] i16 @_Z17test_clamp_ushort
3336
// NATIVE_HALF: call i16 @llvm.[[TARGET]].uclamp.i16(
@@ -41,6 +44,9 @@ uint16_t3 test_clamp_ushort3(uint16_t3 p0, uint16_t3 p1) { return clamp(p0, p1,p
4144
// NATIVE_HALF: define [[FNATTRS]] <4 x i16> @_Z18test_clamp_ushort4
4245
// NATIVE_HALF: call <4 x i16> @llvm.[[TARGET]].uclamp.v4i16
4346
uint16_t4 test_clamp_ushort4(uint16_t4 p0, uint16_t4 p1) { return clamp(p0, p1,p1); }
47+
// NATIVE_HALF: define [[FNATTRS]] <4 x i16> {{.*}}test_clamp_ushort4_mismatch
48+
// NATIVE_HALF: call <4 x i16> @llvm.[[TARGET]].uclamp.v4i16
49+
uint16_t4 test_clamp_ushort4_mismatch(uint16_t4 p0, uint16_t p1) { return clamp(p0, p0,p1); }
4450
#endif
4551

4652
// CHECK: define [[FNATTRS]] i32 @_Z14test_clamp_int
@@ -55,6 +61,9 @@ int3 test_clamp_int3(int3 p0, int3 p1) { return clamp(p0, p1,p1); }
5561
// CHECK: define [[FNATTRS]] <4 x i32> @_Z15test_clamp_int4
5662
// CHECK: call <4 x i32> @llvm.[[TARGET]].sclamp.v4i32
5763
int4 test_clamp_int4(int4 p0, int4 p1) { return clamp(p0, p1,p1); }
64+
// CHECK: define [[FNATTRS]] <4 x i32> {{.*}}test_clamp_int4_mismatch
65+
// CHECK: call <4 x i32> @llvm.[[TARGET]].sclamp.v4i32
66+
int4 test_clamp_int4_mismatch(int4 p0, int p1) { return clamp(p0, p0,p1); }
5867

5968
// CHECK: define [[FNATTRS]] i32 @_Z15test_clamp_uint
6069
// CHECK: call i32 @llvm.[[TARGET]].uclamp.i32(
@@ -68,6 +77,9 @@ uint3 test_clamp_uint3(uint3 p0, uint3 p1) { return clamp(p0, p1,p1); }
6877
// CHECK: define [[FNATTRS]] <4 x i32> @_Z16test_clamp_uint4
6978
// CHECK: call <4 x i32> @llvm.[[TARGET]].uclamp.v4i32
7079
uint4 test_clamp_uint4(uint4 p0, uint4 p1) { return clamp(p0, p1,p1); }
80+
// CHECK: define [[FNATTRS]] <4 x i32> {{.*}}test_clamp_uint4_mismatch
81+
// CHECK: call <4 x i32> @llvm.[[TARGET]].uclamp.v4i32
82+
uint4 test_clamp_uint4_mismatch(uint4 p0, uint p1) { return clamp(p0, p0,p1); }
7183

7284
// CHECK: define [[FNATTRS]] i64 @_Z15test_clamp_long
7385
// CHECK: call i64 @llvm.[[TARGET]].sclamp.i64(
@@ -81,6 +93,9 @@ int64_t3 test_clamp_long3(int64_t3 p0, int64_t3 p1) { return clamp(p0, p1,p1); }
8193
// CHECK: define [[FNATTRS]] <4 x i64> @_Z16test_clamp_long4
8294
// CHECK: call <4 x i64> @llvm.[[TARGET]].sclamp.v4i64
8395
int64_t4 test_clamp_long4(int64_t4 p0, int64_t4 p1) { return clamp(p0, p1,p1); }
96+
// CHECK: define [[FNATTRS]] <4 x i64> {{.*}}test_clamp_long4_mismatch
97+
// CHECK: call <4 x i64> @llvm.[[TARGET]].sclamp.v4i64
98+
int64_t4 test_clamp_long4_mismatch(int64_t4 p0, int64_t4 p1) { return clamp(p0, p0,p1); }
8499

85100
// CHECK: define [[FNATTRS]] i64 @_Z16test_clamp_ulong
86101
// CHECK: call i64 @llvm.[[TARGET]].uclamp.i64(
@@ -94,6 +109,9 @@ uint64_t3 test_clamp_ulong3(uint64_t3 p0, uint64_t3 p1) { return clamp(p0, p1,p1
94109
// CHECK: define [[FNATTRS]] <4 x i64> @_Z17test_clamp_ulong4
95110
// CHECK: call <4 x i64> @llvm.[[TARGET]].uclamp.v4i64
96111
uint64_t4 test_clamp_ulong4(uint64_t4 p0, uint64_t4 p1) { return clamp(p0, p1,p1); }
112+
// CHECK: define [[FNATTRS]] <4 x i64> {{.*}}test_clamp_ulong4_mismatch
113+
// CHECK: call <4 x i64> @llvm.[[TARGET]].uclamp.v4i64
114+
uint64_t4 test_clamp_ulong4_mismatch(uint64_t4 p0, uint64_t4 p1) { return clamp(p0, p0,p1); }
97115

98116
// NATIVE_HALF: define [[FNATTRS]] [[FFNATTRS]] half @_Z15test_clamp_half
99117
// NATIVE_HALF: call reassoc nnan ninf nsz arcp afn half @llvm.[[TARGET]].nclamp.f16(
@@ -115,6 +133,11 @@ half3 test_clamp_half3(half3 p0, half3 p1) { return clamp(p0, p1,p1); }
115133
// NO_HALF: define [[FNATTRS]] [[FFNATTRS]] <4 x float> @_Z16test_clamp_half4
116134
// NO_HALF: call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.[[TARGET]].nclamp.v4f32(
117135
half4 test_clamp_half4(half4 p0, half4 p1) { return clamp(p0, p1,p1); }
136+
// NATIVE_HALF: define [[FNATTRS]] [[FFNATTRS]] <4 x half> {{.*}}test_clamp_half4_mismatch
137+
// NATIVE_HALF: call reassoc nnan ninf nsz arcp afn <4 x half> @llvm.[[TARGET]].nclamp.v4f16
138+
// NO_HALF: define [[FNATTRS]] [[FFNATTRS]] <4 x float> {{.*}}test_clamp_half4_mismatch
139+
// NO_HALF: call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.[[TARGET]].nclamp.v4f32(
140+
half4 test_clamp_half4_mismatch(half4 p0, half p1) { return clamp(p0, p0,p1); }
118141

119142
// CHECK: define [[FNATTRS]] [[FFNATTRS]] float @_Z16test_clamp_float
120143
// CHECK: call reassoc nnan ninf nsz arcp afn float @llvm.[[TARGET]].nclamp.f32(
@@ -128,6 +151,9 @@ float3 test_clamp_float3(float3 p0, float3 p1) { return clamp(p0, p1,p1); }
128151
// CHECK: define [[FNATTRS]] [[FFNATTRS]] <4 x float> @_Z17test_clamp_float4
129152
// CHECK: call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.[[TARGET]].nclamp.v4f32
130153
float4 test_clamp_float4(float4 p0, float4 p1) { return clamp(p0, p1,p1); }
154+
// CHECK: define [[FNATTRS]] [[FFNATTRS]] <4 x float> {{.*}}test_clamp_float4_mismatch
155+
// CHECK: call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.[[TARGET]].nclamp.v4f32
156+
float4 test_clamp_float4_mismatch(float4 p0, float p1) { return clamp(p0, p0,p1); }
131157

132158
// CHECK: define [[FNATTRS]] [[FFNATTRS]] double @_Z17test_clamp_double
133159
// CHECK: call reassoc nnan ninf nsz arcp afn double @llvm.[[TARGET]].nclamp.f64(
@@ -141,3 +167,9 @@ double3 test_clamp_double3(double3 p0, double3 p1) { return clamp(p0, p1,p1); }
141167
// CHECK: define [[FNATTRS]] [[FFNATTRS]] <4 x double> @_Z18test_clamp_double4
142168
// CHECK: call reassoc nnan ninf nsz arcp afn <4 x double> @llvm.[[TARGET]].nclamp.v4f64
143169
double4 test_clamp_double4(double4 p0, double4 p1) { return clamp(p0, p1,p1); }
170+
// CHECK: define [[FNATTRS]] [[FFNATTRS]] <4 x double> {{.*}}test_clamp_double4_mismatch
171+
// CHECK: call reassoc nnan ninf nsz arcp afn <4 x double> @llvm.[[TARGET]].nclamp.v4f64
172+
double4 test_clamp_double4_mismatch(double4 p0, double p1) { return clamp(p0, p0,p1); }
173+
// CHECK: define [[FNATTRS]] [[FFNATTRS]] <4 x double> {{.*}}test_clamp_double4_mismatch2
174+
// CHECK: call reassoc nnan ninf nsz arcp afn <4 x double> @llvm.[[TARGET]].nclamp.v4f64
175+
double4 test_clamp_double4_mismatch2(double4 p0, double p1) { return clamp(p0, p1,p0); }

clang/test/SemaHLSL/BuiltIns/clamp-errors.hlsl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ float2 test_clamp_no_second_arg(float2 p0) {
2222

2323
float2 test_clamp_vector_size_mismatch(float3 p0, float2 p1) {
2424
return clamp(p0, p0, p1);
25-
// expected-warning@-1 {{implicit conversion truncates vector: 'float3' (aka 'vector<float, 3>') to 'vector<float, 2>' (vector of 2 'float' values)}}
25+
// expected-error@-1 {{call to 'clamp' is ambiguous}}
2626
}
2727

2828
float2 test_clamp_builtin_vector_size_mismatch(float3 p0, float2 p1) {

0 commit comments

Comments
 (0)