1
+ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
1
2
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 -x hip \
2
3
// RUN: -aux-triple x86_64-unknown-linux-gnu -fcuda-is-device -emit-llvm %s \
3
4
// RUN: -o - | FileCheck %s
9
10
#include " Inputs/cuda.h"
10
11
11
12
// CHECK-LABEL: @_Z16use_dispatch_ptrPi(
12
- // CHECK: %[[PTR:.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
13
- // CHECK: %{{.*}} = addrspacecast i8 addrspace(4)* %[[PTR]] to i32*
13
+ // CHECK-NEXT: entry:
14
+ // CHECK-NEXT: [[OUT:%.*]] = alloca i32*, align 8, addrspace(5)
15
+ // CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast i32* addrspace(5)* [[OUT]] to i32**
16
+ // CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca i32*, align 8, addrspace(5)
17
+ // CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast i32* addrspace(5)* [[OUT_ADDR]] to i32**
18
+ // CHECK-NEXT: [[DISPATCH_PTR:%.*]] = alloca i32*, align 8, addrspace(5)
19
+ // CHECK-NEXT: [[DISPATCH_PTR_ASCAST:%.*]] = addrspacecast i32* addrspace(5)* [[DISPATCH_PTR]] to i32**
20
+ // CHECK-NEXT: [[TMP0:%.*]] = addrspacecast i32 addrspace(1)* [[OUT_COERCE:%.*]] to i32*
21
+ // CHECK-NEXT: store i32* [[TMP0]], i32** [[OUT_ASCAST]], align 8
22
+ // CHECK-NEXT: [[OUT1:%.*]] = load i32*, i32** [[OUT_ASCAST]], align 8
23
+ // CHECK-NEXT: store i32* [[OUT1]], i32** [[OUT_ADDR_ASCAST]], align 8
24
+ // CHECK-NEXT: [[TMP1:%.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
25
+ // CHECK-NEXT: [[TMP2:%.*]] = addrspacecast i8 addrspace(4)* [[TMP1]] to i32*
26
+ // CHECK-NEXT: store i32* [[TMP2]], i32** [[DISPATCH_PTR_ASCAST]], align 8
27
+ // CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DISPATCH_PTR_ASCAST]], align 8
28
+ // CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
29
+ // CHECK-NEXT: [[TMP5:%.*]] = load i32*, i32** [[OUT_ADDR_ASCAST]], align 8
30
+ // CHECK-NEXT: store i32 [[TMP4]], i32* [[TMP5]], align 4
31
+ // CHECK-NEXT: ret void
32
+ //
14
33
__global__ void use_dispatch_ptr (int * out) {
15
34
const int * dispatch_ptr = (const int *)__builtin_amdgcn_dispatch_ptr ();
16
35
*out = *dispatch_ptr;
17
36
}
18
37
19
- // CHECK-LABEL: @_Z12test_ds_fmaxf(
20
- // CHECK: call contract float @llvm.amdgcn.ds.fmax.f32(float addrspace(3)* @_ZZ12test_ds_fmaxfE6shared, float %{{[^,]*}}, i32 0, i32 0, i1 false)
21
38
__global__
39
+ // CHECK-LABEL: @_Z12test_ds_fmaxf(
40
+ // CHECK-NEXT: entry:
41
+ // CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca float, align 4, addrspace(5)
42
+ // CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[SRC_ADDR]] to float*
43
+ // CHECK-NEXT: [[X:%.*]] = alloca float, align 4, addrspace(5)
44
+ // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[X]] to float*
45
+ // CHECK-NEXT: store float [[SRC:%.*]], float* [[SRC_ADDR_ASCAST]], align 4
46
+ // CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC_ADDR_ASCAST]], align 4
47
+ // CHECK-NEXT: [[TMP1:%.*]] = call contract float @llvm.amdgcn.ds.fmax.f32(float addrspace(3)* @_ZZ12test_ds_fmaxfE6shared, float [[TMP0]], i32 0, i32 0, i1 false)
48
+ // CHECK-NEXT: store volatile float [[TMP1]], float* [[X_ASCAST]], align 4
49
+ // CHECK-NEXT: ret void
50
+ //
22
51
void test_ds_fmax (float src) {
23
52
__shared__ float shared;
24
53
volatile float x = __builtin_amdgcn_ds_fmaxf (&shared, src, 0 , 0 , false );
25
54
}
26
55
27
56
// CHECK-LABEL: @_Z12test_ds_faddf(
28
- // CHECK: call contract float @llvm.amdgcn.ds.fadd.f32(float addrspace(3)* @_ZZ12test_ds_faddfE6shared, float %{{[^,]*}}, i32 0, i32 0, i1 false)
57
+ // CHECK-NEXT: entry:
58
+ // CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca float, align 4, addrspace(5)
59
+ // CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[SRC_ADDR]] to float*
60
+ // CHECK-NEXT: [[X:%.*]] = alloca float, align 4, addrspace(5)
61
+ // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[X]] to float*
62
+ // CHECK-NEXT: store float [[SRC:%.*]], float* [[SRC_ADDR_ASCAST]], align 4
63
+ // CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC_ADDR_ASCAST]], align 4
64
+ // CHECK-NEXT: [[TMP1:%.*]] = call contract float @llvm.amdgcn.ds.fadd.f32(float addrspace(3)* @_ZZ12test_ds_faddfE6shared, float [[TMP0]], i32 0, i32 0, i1 false)
65
+ // CHECK-NEXT: store volatile float [[TMP1]], float* [[X_ASCAST]], align 4
66
+ // CHECK-NEXT: ret void
67
+ //
29
68
__global__ void test_ds_fadd (float src) {
30
69
__shared__ float shared;
31
70
volatile float x = __builtin_amdgcn_ds_faddf (&shared, src, 0 , 0 , false );
32
71
}
33
72
34
- // CHECK-LABEL: @_Z12test_ds_fminfPf(float %src, float addrspace(1)* %shared.coerce
35
- // CHECK: %shared = alloca float*, align 8, addrspace(5)
36
- // CHECK: %shared.ascast = addrspacecast float* addrspace(5)* %shared to float**
37
- // CHECK: %shared.addr = alloca float*, align 8, addrspace(5)
38
- // CHECK: %shared.addr.ascast = addrspacecast float* addrspace(5)* %shared.addr to float**
39
- // CHECK: %[[S0:.*]] = addrspacecast float addrspace(1)* %shared.coerce to float*
40
- // CHECK: store float* %[[S0]], float** %shared.ascast, align 8
41
- // CHECK: %shared1 = load float*, float** %shared.ascast, align 8
42
- // CHECK: store float* %shared1, float** %shared.addr.ascast, align 8
43
- // CHECK: %[[S1:.*]] = load float*, float** %shared.addr.ascast, align 8
44
- // CHECK: %[[S2:.*]] = addrspacecast float* %[[S1]] to float addrspace(3)*
45
- // CHECK: call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* %[[S2]]
73
+ // CHECK-LABEL: @_Z12test_ds_fminfPf(
74
+ // CHECK-NEXT: entry:
75
+ // CHECK-NEXT: [[SHARED:%.*]] = alloca float*, align 8, addrspace(5)
76
+ // CHECK-NEXT: [[SHARED_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[SHARED]] to float**
77
+ // CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca float, align 4, addrspace(5)
78
+ // CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[SRC_ADDR]] to float*
79
+ // CHECK-NEXT: [[SHARED_ADDR:%.*]] = alloca float*, align 8, addrspace(5)
80
+ // CHECK-NEXT: [[SHARED_ADDR_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[SHARED_ADDR]] to float**
81
+ // CHECK-NEXT: [[X:%.*]] = alloca float, align 4, addrspace(5)
82
+ // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[X]] to float*
83
+ // CHECK-NEXT: [[TMP0:%.*]] = addrspacecast float addrspace(1)* [[SHARED_COERCE:%.*]] to float*
84
+ // CHECK-NEXT: store float* [[TMP0]], float** [[SHARED_ASCAST]], align 8
85
+ // CHECK-NEXT: [[SHARED1:%.*]] = load float*, float** [[SHARED_ASCAST]], align 8
86
+ // CHECK-NEXT: store float [[SRC:%.*]], float* [[SRC_ADDR_ASCAST]], align 4
87
+ // CHECK-NEXT: store float* [[SHARED1]], float** [[SHARED_ADDR_ASCAST]], align 8
88
+ // CHECK-NEXT: [[TMP1:%.*]] = load float*, float** [[SHARED_ADDR_ASCAST]], align 8
89
+ // CHECK-NEXT: [[TMP2:%.*]] = addrspacecast float* [[TMP1]] to float addrspace(3)*
90
+ // CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[SRC_ADDR_ASCAST]], align 4
91
+ // CHECK-NEXT: [[TMP4:%.*]] = call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* [[TMP2]], float [[TMP3]], i32 0, i32 0, i1 false)
92
+ // CHECK-NEXT: store volatile float [[TMP4]], float* [[X_ASCAST]], align 4
93
+ // CHECK-NEXT: ret void
94
+ //
46
95
__global__ void test_ds_fmin (float src, float *shared) {
47
96
volatile float x = __builtin_amdgcn_ds_fminf (shared, src, 0 , 0 , false );
48
97
}
49
98
50
- // CHECK: @_Z33test_ret_builtin_nondef_addrspace
51
- // CHECK: %[[X:.*]] = alloca i8*, align 8, addrspace(5)
52
- // CHECK: %[[XC:.*]] = addrspacecast i8* addrspace(5)* %[[X]] to i8**
53
- // CHECK: %[[Y:.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
54
- // CHECK: %[[YASCAST:.*]] = addrspacecast i8 addrspace(4)* %[[Y]] to i8*
55
- // CHECK: store i8* %[[YASCAST]], i8** %[[XC]], align 8
99
+ // CHECK-LABEL: @_Z33test_ret_builtin_nondef_addrspacev(
100
+ // CHECK-NEXT: entry:
101
+ // CHECK-NEXT: [[X:%.*]] = alloca i8*, align 8, addrspace(5)
102
+ // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast i8* addrspace(5)* [[X]] to i8**
103
+ // CHECK-NEXT: [[TMP0:%.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
104
+ // CHECK-NEXT: [[TMP1:%.*]] = addrspacecast i8 addrspace(4)* [[TMP0]] to i8*
105
+ // CHECK-NEXT: store i8* [[TMP1]], i8** [[X_ASCAST]], align 8
106
+ // CHECK-NEXT: ret void
107
+ //
56
108
__device__ void test_ret_builtin_nondef_addrspace () {
57
109
void *x = __builtin_amdgcn_dispatch_ptr ();
58
110
}
59
111
60
112
// CHECK-LABEL: @_Z6endpgmv(
61
- // CHECK: call void @llvm.amdgcn.endpgm()
113
+ // CHECK-NEXT: entry:
114
+ // CHECK-NEXT: call void @llvm.amdgcn.endpgm()
115
+ // CHECK-NEXT: ret void
116
+ //
62
117
__global__ void endpgm () {
63
118
__builtin_amdgcn_endpgm ();
64
119
}
65
120
66
121
// Check the 64 bit argument is correctly passed to the intrinsic without truncation or assertion.
67
122
68
- // CHECK-LABEL: @_Z14test_uicmp_i64
69
- // CHECK: store i64* %out1, i64** %out.addr.ascast
70
- // CHECK-NEXT: store i64 %a, i64* %a.addr.ascast
71
- // CHECK-NEXT: store i64 %b, i64* %b.addr.ascast
72
- // CHECK-NEXT: %[[V0:.*]] = load i64, i64* %a.addr.ascast
73
- // CHECK-NEXT: %[[V1:.*]] = load i64, i64* %b.addr.ascast
74
- // CHECK-NEXT: %[[V2:.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 %[[V0]], i64 %[[V1]], i32 35)
75
- // CHECK-NEXT: %[[V3:.*]] = load i64*, i64** %out.addr.ascast
76
- // CHECK-NEXT: store i64 %[[V2]], i64* %[[V3]]
77
- // CHECK-NEXT: ret void
123
+ // CHECK-LABEL: @_Z14test_uicmp_i64Pyyy(
124
+ // CHECK-NEXT: entry:
125
+ // CHECK-NEXT: [[OUT:%.*]] = alloca i64*, align 8, addrspace(5)
126
+ // CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast i64* addrspace(5)* [[OUT]] to i64**
127
+ // CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca i64*, align 8, addrspace(5)
128
+ // CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast i64* addrspace(5)* [[OUT_ADDR]] to i64**
129
+ // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
130
+ // CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast i64 addrspace(5)* [[A_ADDR]] to i64*
131
+ // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
132
+ // CHECK-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast i64 addrspace(5)* [[B_ADDR]] to i64*
133
+ // CHECK-NEXT: [[TMP0:%.*]] = addrspacecast i64 addrspace(1)* [[OUT_COERCE:%.*]] to i64*
134
+ // CHECK-NEXT: store i64* [[TMP0]], i64** [[OUT_ASCAST]], align 8
135
+ // CHECK-NEXT: [[OUT1:%.*]] = load i64*, i64** [[OUT_ASCAST]], align 8
136
+ // CHECK-NEXT: store i64* [[OUT1]], i64** [[OUT_ADDR_ASCAST]], align 8
137
+ // CHECK-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR_ASCAST]], align 8
138
+ // CHECK-NEXT: store i64 [[B:%.*]], i64* [[B_ADDR_ASCAST]], align 8
139
+ // CHECK-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_ADDR_ASCAST]], align 8
140
+ // CHECK-NEXT: [[TMP2:%.*]] = load i64, i64* [[B_ADDR_ASCAST]], align 8
141
+ // CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 [[TMP1]], i64 [[TMP2]], i32 35)
142
+ // CHECK-NEXT: [[TMP4:%.*]] = load i64*, i64** [[OUT_ADDR_ASCAST]], align 8
143
+ // CHECK-NEXT: store i64 [[TMP3]], i64* [[TMP4]], align 8
144
+ // CHECK-NEXT: ret void
145
+ //
78
146
__global__ void test_uicmp_i64 (unsigned long long *out, unsigned long long a, unsigned long long b)
79
147
{
80
148
*out = __builtin_amdgcn_uicmpl (a, b, 30 +5 );
81
149
}
82
150
83
151
// Check the 64 bit return value is correctly returned without truncation or assertion.
84
152
85
- // CHECK-LABEL: @_Z14test_s_memtime
86
- // CHECK: %[[V1:.*]] = call i64 @llvm.amdgcn.s.memtime()
87
- // CHECK-NEXT: %[[PTR:.*]] = load i64*, i64** %out.addr.ascast
88
- // CHECK-NEXT: store i64 %[[V1]], i64* %[[PTR]]
89
- // CHECK-NEXT: ret void
153
+ // CHECK-LABEL: @_Z14test_s_memtimePy(
154
+ // CHECK-NEXT: entry:
155
+ // CHECK-NEXT: [[OUT:%.*]] = alloca i64*, align 8, addrspace(5)
156
+ // CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast i64* addrspace(5)* [[OUT]] to i64**
157
+ // CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca i64*, align 8, addrspace(5)
158
+ // CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast i64* addrspace(5)* [[OUT_ADDR]] to i64**
159
+ // CHECK-NEXT: [[TMP0:%.*]] = addrspacecast i64 addrspace(1)* [[OUT_COERCE:%.*]] to i64*
160
+ // CHECK-NEXT: store i64* [[TMP0]], i64** [[OUT_ASCAST]], align 8
161
+ // CHECK-NEXT: [[OUT1:%.*]] = load i64*, i64** [[OUT_ASCAST]], align 8
162
+ // CHECK-NEXT: store i64* [[OUT1]], i64** [[OUT_ADDR_ASCAST]], align 8
163
+ // CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.amdgcn.s.memtime()
164
+ // CHECK-NEXT: [[TMP2:%.*]] = load i64*, i64** [[OUT_ADDR_ASCAST]], align 8
165
+ // CHECK-NEXT: store i64 [[TMP1]], i64* [[TMP2]], align 8
166
+ // CHECK-NEXT: ret void
167
+ //
90
168
__global__ void test_s_memtime (unsigned long long * out)
91
169
{
92
170
*out = __builtin_amdgcn_s_memtime ();
@@ -95,41 +173,55 @@ __global__ void test_s_memtime(unsigned long long* out)
95
173
// Check a generic pointer can be passed as a shared pointer and a generic pointer.
96
174
__device__ void func (float *x);
97
175
98
- // CHECK: @_Z17test_ds_fmin_funcfPf
99
- // CHECK: %[[SHARED:.*]] = alloca float*, align 8, addrspace(5)
100
- // CHECK: %[[SHARED_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[SHARED]] to float**
101
- // CHECK: %[[SRC_ADDR:.*]] = alloca float, align 4, addrspace(5)
102
- // CHECK: %[[SRC_ADDR_ASCAST:.*]] = addrspacecast float addrspace(5)* %[[SRC_ADDR]] to float*
103
- // CHECK: %[[SHARED_ADDR:.*]] = alloca float*, align 8, addrspace(5)
104
- // CHECK: %[[SHARED_ADDR_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[SHARED_ADDR]] to float**
105
- // CHECK: %[[X:.*]] = alloca float, align 4, addrspace(5)
106
- // CHECK: %[[X_ASCAST:.*]] = addrspacecast float addrspace(5)* %[[X]] to float*
107
- // CHECK: %[[SHARED1:.*]] = load float*, float** %[[SHARED_ASCAST]], align 8
108
- // CHECK: store float %src, float* %[[SRC_ADDR_ASCAST]], align 4
109
- // CHECK: store float* %[[SHARED1]], float** %[[SHARED_ADDR_ASCAST]], align 8
110
- // CHECK: %[[ARG0_PTR:.*]] = load float*, float** %[[SHARED_ADDR_ASCAST]], align 8
111
- // CHECK: %[[ARG0:.*]] = addrspacecast float* %[[ARG0_PTR]] to float addrspace(3)*
112
- // CHECK: call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* %[[ARG0]]
113
- // CHECK: %[[ARG0:.*]] = load float*, float** %[[SHARED_ADDR_ASCAST]], align 8
114
- // CHECK: call void @_Z4funcPf(float* %[[ARG0]]) #8
176
+ // CHECK-LABEL: @_Z17test_ds_fmin_funcfPf(
177
+ // CHECK-NEXT: entry:
178
+ // CHECK-NEXT: [[SHARED:%.*]] = alloca float*, align 8, addrspace(5)
179
+ // CHECK-NEXT: [[SHARED_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[SHARED]] to float**
180
+ // CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca float, align 4, addrspace(5)
181
+ // CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[SRC_ADDR]] to float*
182
+ // CHECK-NEXT: [[SHARED_ADDR:%.*]] = alloca float*, align 8, addrspace(5)
183
+ // CHECK-NEXT: [[SHARED_ADDR_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[SHARED_ADDR]] to float**
184
+ // CHECK-NEXT: [[X:%.*]] = alloca float, align 4, addrspace(5)
185
+ // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[X]] to float*
186
+ // CHECK-NEXT: [[TMP0:%.*]] = addrspacecast float addrspace(1)* [[SHARED_COERCE:%.*]] to float*
187
+ // CHECK-NEXT: store float* [[TMP0]], float** [[SHARED_ASCAST]], align 8
188
+ // CHECK-NEXT: [[SHARED1:%.*]] = load float*, float** [[SHARED_ASCAST]], align 8
189
+ // CHECK-NEXT: store float [[SRC:%.*]], float* [[SRC_ADDR_ASCAST]], align 4
190
+ // CHECK-NEXT: store float* [[SHARED1]], float** [[SHARED_ADDR_ASCAST]], align 8
191
+ // CHECK-NEXT: [[TMP1:%.*]] = load float*, float** [[SHARED_ADDR_ASCAST]], align 8
192
+ // CHECK-NEXT: [[TMP2:%.*]] = addrspacecast float* [[TMP1]] to float addrspace(3)*
193
+ // CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[SRC_ADDR_ASCAST]], align 4
194
+ // CHECK-NEXT: [[TMP4:%.*]] = call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* [[TMP2]], float [[TMP3]], i32 0, i32 0, i1 false)
195
+ // CHECK-NEXT: store volatile float [[TMP4]], float* [[X_ASCAST]], align 4
196
+ // CHECK-NEXT: [[TMP5:%.*]] = load float*, float** [[SHARED_ADDR_ASCAST]], align 8
197
+ // CHECK-NEXT: call void @_Z4funcPf(float* [[TMP5]]) #[[ATTR8:[0-9]+]]
198
+ // CHECK-NEXT: ret void
199
+ //
115
200
__global__ void test_ds_fmin_func (float src, float *__restrict shared) {
116
201
volatile float x = __builtin_amdgcn_ds_fminf (shared, src, 0 , 0 , false );
117
202
func (shared);
118
203
}
119
204
120
- // CHECK: @_Z14test_is_sharedPf(float addrspace(1)* %[[X_COERCE:.*]])
121
- // CHECK: %[[X:.*]] = alloca float*, align 8, addrspace(5)
122
- // CHECK: %[[X_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[X]] to float**
123
- // CHECK: %[[X_ADDR:.*]] = alloca float*, align 8, addrspace(5)
124
- // CHECK: %[[X_ADDR_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[X_ADDR]] to float**
125
- // CHECK: %[[X_FP:.*]] = addrspacecast float addrspace(1)* %[[X_COERCE]] to float*
126
- // CHECK: store float* %[[X_FP]], float** %[[X_ASCAST]], align 8
127
- // CHECK: %[[X1:.*]] = load float*, float** %[[X_ASCAST]], align 8
128
- // CHECK: store float* %[[X1]], float** %[[X_ADDR_ASCAST]], align 8
129
- // CHECK: %[[X_TMP:.*]] = load float*, float** %[[X_ADDR_ASCAST]], align 8
130
- // CHECK: %[[X_ARG:.*]] = bitcast float* %[[X_TMP]] to i8*
131
- // CHECK: call i1 @llvm.amdgcn.is.shared(i8* %[[X_ARG]])
132
205
206
+ // CHECK-LABEL: @_Z14test_is_sharedPf(
207
+ // CHECK-NEXT: entry:
208
+ // CHECK-NEXT: [[X:%.*]] = alloca float*, align 8, addrspace(5)
209
+ // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[X]] to float**
210
+ // CHECK-NEXT: [[X_ADDR:%.*]] = alloca float*, align 8, addrspace(5)
211
+ // CHECK-NEXT: [[X_ADDR_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[X_ADDR]] to float**
212
+ // CHECK-NEXT: [[RET:%.*]] = alloca i8, align 1, addrspace(5)
213
+ // CHECK-NEXT: [[RET_ASCAST:%.*]] = addrspacecast i8 addrspace(5)* [[RET]] to i8*
214
+ // CHECK-NEXT: [[TMP0:%.*]] = addrspacecast float addrspace(1)* [[X_COERCE:%.*]] to float*
215
+ // CHECK-NEXT: store float* [[TMP0]], float** [[X_ASCAST]], align 8
216
+ // CHECK-NEXT: [[X1:%.*]] = load float*, float** [[X_ASCAST]], align 8
217
+ // CHECK-NEXT: store float* [[X1]], float** [[X_ADDR_ASCAST]], align 8
218
+ // CHECK-NEXT: [[TMP1:%.*]] = load float*, float** [[X_ADDR_ASCAST]], align 8
219
+ // CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[TMP1]] to i8*
220
+ // CHECK-NEXT: [[TMP3:%.*]] = call i1 @llvm.amdgcn.is.shared(i8* [[TMP2]])
221
+ // CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP3]] to i8
222
+ // CHECK-NEXT: store i8 [[FROMBOOL]], i8* [[RET_ASCAST]], align 1
223
+ // CHECK-NEXT: ret void
224
+ //
133
225
__global__ void test_is_shared (float *x){
134
226
bool ret = __builtin_amdgcn_is_shared (x);
135
227
}
0 commit comments