Skip to content

Commit 393581d

Browse files
committed
[CFE][Codegen] Update auto-generated check lines for few GPU lit tests
which is essentially required as a pre-commit for https://reviews.llvm.org/D110257. Reviewed By: yaxunl Differential Revision: https://reviews.llvm.org/D110676
1 parent 62d9163 commit 393581d

File tree

5 files changed

+609
-319
lines changed

5 files changed

+609
-319
lines changed
Lines changed: 160 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
12
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 -x hip \
23
// RUN: -aux-triple x86_64-unknown-linux-gnu -fcuda-is-device -emit-llvm %s \
34
// RUN: -o - | FileCheck %s
@@ -9,84 +10,161 @@
910
#include "Inputs/cuda.h"
1011

1112
// CHECK-LABEL: @_Z16use_dispatch_ptrPi(
12-
// CHECK: %[[PTR:.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
13-
// CHECK: %{{.*}} = addrspacecast i8 addrspace(4)* %[[PTR]] to i32*
13+
// CHECK-NEXT: entry:
14+
// CHECK-NEXT: [[OUT:%.*]] = alloca i32*, align 8, addrspace(5)
15+
// CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast i32* addrspace(5)* [[OUT]] to i32**
16+
// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca i32*, align 8, addrspace(5)
17+
// CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast i32* addrspace(5)* [[OUT_ADDR]] to i32**
18+
// CHECK-NEXT: [[DISPATCH_PTR:%.*]] = alloca i32*, align 8, addrspace(5)
19+
// CHECK-NEXT: [[DISPATCH_PTR_ASCAST:%.*]] = addrspacecast i32* addrspace(5)* [[DISPATCH_PTR]] to i32**
20+
// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast i32 addrspace(1)* [[OUT_COERCE:%.*]] to i32*
21+
// CHECK-NEXT: store i32* [[TMP0]], i32** [[OUT_ASCAST]], align 8
22+
// CHECK-NEXT: [[OUT1:%.*]] = load i32*, i32** [[OUT_ASCAST]], align 8
23+
// CHECK-NEXT: store i32* [[OUT1]], i32** [[OUT_ADDR_ASCAST]], align 8
24+
// CHECK-NEXT: [[TMP1:%.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
25+
// CHECK-NEXT: [[TMP2:%.*]] = addrspacecast i8 addrspace(4)* [[TMP1]] to i32*
26+
// CHECK-NEXT: store i32* [[TMP2]], i32** [[DISPATCH_PTR_ASCAST]], align 8
27+
// CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DISPATCH_PTR_ASCAST]], align 8
28+
// CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
29+
// CHECK-NEXT: [[TMP5:%.*]] = load i32*, i32** [[OUT_ADDR_ASCAST]], align 8
30+
// CHECK-NEXT: store i32 [[TMP4]], i32* [[TMP5]], align 4
31+
// CHECK-NEXT: ret void
32+
//
1433
__global__ void use_dispatch_ptr(int* out) {
1534
const int* dispatch_ptr = (const int*)__builtin_amdgcn_dispatch_ptr();
1635
*out = *dispatch_ptr;
1736
}
1837

19-
// CHECK-LABEL: @_Z12test_ds_fmaxf(
20-
// CHECK: call contract float @llvm.amdgcn.ds.fmax.f32(float addrspace(3)* @_ZZ12test_ds_fmaxfE6shared, float %{{[^,]*}}, i32 0, i32 0, i1 false)
2138
__global__
39+
// CHECK-LABEL: @_Z12test_ds_fmaxf(
40+
// CHECK-NEXT: entry:
41+
// CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca float, align 4, addrspace(5)
42+
// CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[SRC_ADDR]] to float*
43+
// CHECK-NEXT: [[X:%.*]] = alloca float, align 4, addrspace(5)
44+
// CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[X]] to float*
45+
// CHECK-NEXT: store float [[SRC:%.*]], float* [[SRC_ADDR_ASCAST]], align 4
46+
// CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC_ADDR_ASCAST]], align 4
47+
// CHECK-NEXT: [[TMP1:%.*]] = call contract float @llvm.amdgcn.ds.fmax.f32(float addrspace(3)* @_ZZ12test_ds_fmaxfE6shared, float [[TMP0]], i32 0, i32 0, i1 false)
48+
// CHECK-NEXT: store volatile float [[TMP1]], float* [[X_ASCAST]], align 4
49+
// CHECK-NEXT: ret void
50+
//
2251
void test_ds_fmax(float src) {
2352
__shared__ float shared;
2453
volatile float x = __builtin_amdgcn_ds_fmaxf(&shared, src, 0, 0, false);
2554
}
2655

2756
// CHECK-LABEL: @_Z12test_ds_faddf(
28-
// CHECK: call contract float @llvm.amdgcn.ds.fadd.f32(float addrspace(3)* @_ZZ12test_ds_faddfE6shared, float %{{[^,]*}}, i32 0, i32 0, i1 false)
57+
// CHECK-NEXT: entry:
58+
// CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca float, align 4, addrspace(5)
59+
// CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[SRC_ADDR]] to float*
60+
// CHECK-NEXT: [[X:%.*]] = alloca float, align 4, addrspace(5)
61+
// CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[X]] to float*
62+
// CHECK-NEXT: store float [[SRC:%.*]], float* [[SRC_ADDR_ASCAST]], align 4
63+
// CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC_ADDR_ASCAST]], align 4
64+
// CHECK-NEXT: [[TMP1:%.*]] = call contract float @llvm.amdgcn.ds.fadd.f32(float addrspace(3)* @_ZZ12test_ds_faddfE6shared, float [[TMP0]], i32 0, i32 0, i1 false)
65+
// CHECK-NEXT: store volatile float [[TMP1]], float* [[X_ASCAST]], align 4
66+
// CHECK-NEXT: ret void
67+
//
2968
__global__ void test_ds_fadd(float src) {
3069
__shared__ float shared;
3170
volatile float x = __builtin_amdgcn_ds_faddf(&shared, src, 0, 0, false);
3271
}
3372

34-
// CHECK-LABEL: @_Z12test_ds_fminfPf(float %src, float addrspace(1)* %shared.coerce
35-
// CHECK: %shared = alloca float*, align 8, addrspace(5)
36-
// CHECK: %shared.ascast = addrspacecast float* addrspace(5)* %shared to float**
37-
// CHECK: %shared.addr = alloca float*, align 8, addrspace(5)
38-
// CHECK: %shared.addr.ascast = addrspacecast float* addrspace(5)* %shared.addr to float**
39-
// CHECK: %[[S0:.*]] = addrspacecast float addrspace(1)* %shared.coerce to float*
40-
// CHECK: store float* %[[S0]], float** %shared.ascast, align 8
41-
// CHECK: %shared1 = load float*, float** %shared.ascast, align 8
42-
// CHECK: store float* %shared1, float** %shared.addr.ascast, align 8
43-
// CHECK: %[[S1:.*]] = load float*, float** %shared.addr.ascast, align 8
44-
// CHECK: %[[S2:.*]] = addrspacecast float* %[[S1]] to float addrspace(3)*
45-
// CHECK: call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* %[[S2]]
73+
// CHECK-LABEL: @_Z12test_ds_fminfPf(
74+
// CHECK-NEXT: entry:
75+
// CHECK-NEXT: [[SHARED:%.*]] = alloca float*, align 8, addrspace(5)
76+
// CHECK-NEXT: [[SHARED_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[SHARED]] to float**
77+
// CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca float, align 4, addrspace(5)
78+
// CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[SRC_ADDR]] to float*
79+
// CHECK-NEXT: [[SHARED_ADDR:%.*]] = alloca float*, align 8, addrspace(5)
80+
// CHECK-NEXT: [[SHARED_ADDR_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[SHARED_ADDR]] to float**
81+
// CHECK-NEXT: [[X:%.*]] = alloca float, align 4, addrspace(5)
82+
// CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[X]] to float*
83+
// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast float addrspace(1)* [[SHARED_COERCE:%.*]] to float*
84+
// CHECK-NEXT: store float* [[TMP0]], float** [[SHARED_ASCAST]], align 8
85+
// CHECK-NEXT: [[SHARED1:%.*]] = load float*, float** [[SHARED_ASCAST]], align 8
86+
// CHECK-NEXT: store float [[SRC:%.*]], float* [[SRC_ADDR_ASCAST]], align 4
87+
// CHECK-NEXT: store float* [[SHARED1]], float** [[SHARED_ADDR_ASCAST]], align 8
88+
// CHECK-NEXT: [[TMP1:%.*]] = load float*, float** [[SHARED_ADDR_ASCAST]], align 8
89+
// CHECK-NEXT: [[TMP2:%.*]] = addrspacecast float* [[TMP1]] to float addrspace(3)*
90+
// CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[SRC_ADDR_ASCAST]], align 4
91+
// CHECK-NEXT: [[TMP4:%.*]] = call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* [[TMP2]], float [[TMP3]], i32 0, i32 0, i1 false)
92+
// CHECK-NEXT: store volatile float [[TMP4]], float* [[X_ASCAST]], align 4
93+
// CHECK-NEXT: ret void
94+
//
4695
__global__ void test_ds_fmin(float src, float *shared) {
4796
volatile float x = __builtin_amdgcn_ds_fminf(shared, src, 0, 0, false);
4897
}
4998

50-
// CHECK: @_Z33test_ret_builtin_nondef_addrspace
51-
// CHECK: %[[X:.*]] = alloca i8*, align 8, addrspace(5)
52-
// CHECK: %[[XC:.*]] = addrspacecast i8* addrspace(5)* %[[X]] to i8**
53-
// CHECK: %[[Y:.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
54-
// CHECK: %[[YASCAST:.*]] = addrspacecast i8 addrspace(4)* %[[Y]] to i8*
55-
// CHECK: store i8* %[[YASCAST]], i8** %[[XC]], align 8
99+
// CHECK-LABEL: @_Z33test_ret_builtin_nondef_addrspacev(
100+
// CHECK-NEXT: entry:
101+
// CHECK-NEXT: [[X:%.*]] = alloca i8*, align 8, addrspace(5)
102+
// CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast i8* addrspace(5)* [[X]] to i8**
103+
// CHECK-NEXT: [[TMP0:%.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
104+
// CHECK-NEXT: [[TMP1:%.*]] = addrspacecast i8 addrspace(4)* [[TMP0]] to i8*
105+
// CHECK-NEXT: store i8* [[TMP1]], i8** [[X_ASCAST]], align 8
106+
// CHECK-NEXT: ret void
107+
//
56108
__device__ void test_ret_builtin_nondef_addrspace() {
57109
void *x = __builtin_amdgcn_dispatch_ptr();
58110
}
59111

60112
// CHECK-LABEL: @_Z6endpgmv(
61-
// CHECK: call void @llvm.amdgcn.endpgm()
113+
// CHECK-NEXT: entry:
114+
// CHECK-NEXT: call void @llvm.amdgcn.endpgm()
115+
// CHECK-NEXT: ret void
116+
//
62117
__global__ void endpgm() {
63118
__builtin_amdgcn_endpgm();
64119
}
65120

66121
// Check the 64 bit argument is correctly passed to the intrinsic without truncation or assertion.
67122

68-
// CHECK-LABEL: @_Z14test_uicmp_i64
69-
// CHECK: store i64* %out1, i64** %out.addr.ascast
70-
// CHECK-NEXT: store i64 %a, i64* %a.addr.ascast
71-
// CHECK-NEXT: store i64 %b, i64* %b.addr.ascast
72-
// CHECK-NEXT: %[[V0:.*]] = load i64, i64* %a.addr.ascast
73-
// CHECK-NEXT: %[[V1:.*]] = load i64, i64* %b.addr.ascast
74-
// CHECK-NEXT: %[[V2:.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 %[[V0]], i64 %[[V1]], i32 35)
75-
// CHECK-NEXT: %[[V3:.*]] = load i64*, i64** %out.addr.ascast
76-
// CHECK-NEXT: store i64 %[[V2]], i64* %[[V3]]
77-
// CHECK-NEXT: ret void
123+
// CHECK-LABEL: @_Z14test_uicmp_i64Pyyy(
124+
// CHECK-NEXT: entry:
125+
// CHECK-NEXT: [[OUT:%.*]] = alloca i64*, align 8, addrspace(5)
126+
// CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast i64* addrspace(5)* [[OUT]] to i64**
127+
// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca i64*, align 8, addrspace(5)
128+
// CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast i64* addrspace(5)* [[OUT_ADDR]] to i64**
129+
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
130+
// CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast i64 addrspace(5)* [[A_ADDR]] to i64*
131+
// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
132+
// CHECK-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast i64 addrspace(5)* [[B_ADDR]] to i64*
133+
// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast i64 addrspace(1)* [[OUT_COERCE:%.*]] to i64*
134+
// CHECK-NEXT: store i64* [[TMP0]], i64** [[OUT_ASCAST]], align 8
135+
// CHECK-NEXT: [[OUT1:%.*]] = load i64*, i64** [[OUT_ASCAST]], align 8
136+
// CHECK-NEXT: store i64* [[OUT1]], i64** [[OUT_ADDR_ASCAST]], align 8
137+
// CHECK-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR_ASCAST]], align 8
138+
// CHECK-NEXT: store i64 [[B:%.*]], i64* [[B_ADDR_ASCAST]], align 8
139+
// CHECK-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_ADDR_ASCAST]], align 8
140+
// CHECK-NEXT: [[TMP2:%.*]] = load i64, i64* [[B_ADDR_ASCAST]], align 8
141+
// CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 [[TMP1]], i64 [[TMP2]], i32 35)
142+
// CHECK-NEXT: [[TMP4:%.*]] = load i64*, i64** [[OUT_ADDR_ASCAST]], align 8
143+
// CHECK-NEXT: store i64 [[TMP3]], i64* [[TMP4]], align 8
144+
// CHECK-NEXT: ret void
145+
//
78146
__global__ void test_uicmp_i64(unsigned long long *out, unsigned long long a, unsigned long long b)
79147
{
80148
*out = __builtin_amdgcn_uicmpl(a, b, 30+5);
81149
}
82150

83151
// Check the 64 bit return value is correctly returned without truncation or assertion.
84152

85-
// CHECK-LABEL: @_Z14test_s_memtime
86-
// CHECK: %[[V1:.*]] = call i64 @llvm.amdgcn.s.memtime()
87-
// CHECK-NEXT: %[[PTR:.*]] = load i64*, i64** %out.addr.ascast
88-
// CHECK-NEXT: store i64 %[[V1]], i64* %[[PTR]]
89-
// CHECK-NEXT: ret void
153+
// CHECK-LABEL: @_Z14test_s_memtimePy(
154+
// CHECK-NEXT: entry:
155+
// CHECK-NEXT: [[OUT:%.*]] = alloca i64*, align 8, addrspace(5)
156+
// CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast i64* addrspace(5)* [[OUT]] to i64**
157+
// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca i64*, align 8, addrspace(5)
158+
// CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast i64* addrspace(5)* [[OUT_ADDR]] to i64**
159+
// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast i64 addrspace(1)* [[OUT_COERCE:%.*]] to i64*
160+
// CHECK-NEXT: store i64* [[TMP0]], i64** [[OUT_ASCAST]], align 8
161+
// CHECK-NEXT: [[OUT1:%.*]] = load i64*, i64** [[OUT_ASCAST]], align 8
162+
// CHECK-NEXT: store i64* [[OUT1]], i64** [[OUT_ADDR_ASCAST]], align 8
163+
// CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.amdgcn.s.memtime()
164+
// CHECK-NEXT: [[TMP2:%.*]] = load i64*, i64** [[OUT_ADDR_ASCAST]], align 8
165+
// CHECK-NEXT: store i64 [[TMP1]], i64* [[TMP2]], align 8
166+
// CHECK-NEXT: ret void
167+
//
90168
__global__ void test_s_memtime(unsigned long long* out)
91169
{
92170
*out = __builtin_amdgcn_s_memtime();
@@ -95,41 +173,55 @@ __global__ void test_s_memtime(unsigned long long* out)
95173
// Check a generic pointer can be passed as a shared pointer and a generic pointer.
96174
__device__ void func(float *x);
97175

98-
// CHECK: @_Z17test_ds_fmin_funcfPf
99-
// CHECK: %[[SHARED:.*]] = alloca float*, align 8, addrspace(5)
100-
// CHECK: %[[SHARED_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[SHARED]] to float**
101-
// CHECK: %[[SRC_ADDR:.*]] = alloca float, align 4, addrspace(5)
102-
// CHECK: %[[SRC_ADDR_ASCAST:.*]] = addrspacecast float addrspace(5)* %[[SRC_ADDR]] to float*
103-
// CHECK: %[[SHARED_ADDR:.*]] = alloca float*, align 8, addrspace(5)
104-
// CHECK: %[[SHARED_ADDR_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[SHARED_ADDR]] to float**
105-
// CHECK: %[[X:.*]] = alloca float, align 4, addrspace(5)
106-
// CHECK: %[[X_ASCAST:.*]] = addrspacecast float addrspace(5)* %[[X]] to float*
107-
// CHECK: %[[SHARED1:.*]] = load float*, float** %[[SHARED_ASCAST]], align 8
108-
// CHECK: store float %src, float* %[[SRC_ADDR_ASCAST]], align 4
109-
// CHECK: store float* %[[SHARED1]], float** %[[SHARED_ADDR_ASCAST]], align 8
110-
// CHECK: %[[ARG0_PTR:.*]] = load float*, float** %[[SHARED_ADDR_ASCAST]], align 8
111-
// CHECK: %[[ARG0:.*]] = addrspacecast float* %[[ARG0_PTR]] to float addrspace(3)*
112-
// CHECK: call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* %[[ARG0]]
113-
// CHECK: %[[ARG0:.*]] = load float*, float** %[[SHARED_ADDR_ASCAST]], align 8
114-
// CHECK: call void @_Z4funcPf(float* %[[ARG0]]) #8
176+
// CHECK-LABEL: @_Z17test_ds_fmin_funcfPf(
177+
// CHECK-NEXT: entry:
178+
// CHECK-NEXT: [[SHARED:%.*]] = alloca float*, align 8, addrspace(5)
179+
// CHECK-NEXT: [[SHARED_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[SHARED]] to float**
180+
// CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca float, align 4, addrspace(5)
181+
// CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[SRC_ADDR]] to float*
182+
// CHECK-NEXT: [[SHARED_ADDR:%.*]] = alloca float*, align 8, addrspace(5)
183+
// CHECK-NEXT: [[SHARED_ADDR_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[SHARED_ADDR]] to float**
184+
// CHECK-NEXT: [[X:%.*]] = alloca float, align 4, addrspace(5)
185+
// CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[X]] to float*
186+
// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast float addrspace(1)* [[SHARED_COERCE:%.*]] to float*
187+
// CHECK-NEXT: store float* [[TMP0]], float** [[SHARED_ASCAST]], align 8
188+
// CHECK-NEXT: [[SHARED1:%.*]] = load float*, float** [[SHARED_ASCAST]], align 8
189+
// CHECK-NEXT: store float [[SRC:%.*]], float* [[SRC_ADDR_ASCAST]], align 4
190+
// CHECK-NEXT: store float* [[SHARED1]], float** [[SHARED_ADDR_ASCAST]], align 8
191+
// CHECK-NEXT: [[TMP1:%.*]] = load float*, float** [[SHARED_ADDR_ASCAST]], align 8
192+
// CHECK-NEXT: [[TMP2:%.*]] = addrspacecast float* [[TMP1]] to float addrspace(3)*
193+
// CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[SRC_ADDR_ASCAST]], align 4
194+
// CHECK-NEXT: [[TMP4:%.*]] = call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* [[TMP2]], float [[TMP3]], i32 0, i32 0, i1 false)
195+
// CHECK-NEXT: store volatile float [[TMP4]], float* [[X_ASCAST]], align 4
196+
// CHECK-NEXT: [[TMP5:%.*]] = load float*, float** [[SHARED_ADDR_ASCAST]], align 8
197+
// CHECK-NEXT: call void @_Z4funcPf(float* [[TMP5]]) #[[ATTR8:[0-9]+]]
198+
// CHECK-NEXT: ret void
199+
//
115200
__global__ void test_ds_fmin_func(float src, float *__restrict shared) {
116201
volatile float x = __builtin_amdgcn_ds_fminf(shared, src, 0, 0, false);
117202
func(shared);
118203
}
119204

120-
// CHECK: @_Z14test_is_sharedPf(float addrspace(1)* %[[X_COERCE:.*]])
121-
// CHECK: %[[X:.*]] = alloca float*, align 8, addrspace(5)
122-
// CHECK: %[[X_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[X]] to float**
123-
// CHECK: %[[X_ADDR:.*]] = alloca float*, align 8, addrspace(5)
124-
// CHECK: %[[X_ADDR_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[X_ADDR]] to float**
125-
// CHECK: %[[X_FP:.*]] = addrspacecast float addrspace(1)* %[[X_COERCE]] to float*
126-
// CHECK: store float* %[[X_FP]], float** %[[X_ASCAST]], align 8
127-
// CHECK: %[[X1:.*]] = load float*, float** %[[X_ASCAST]], align 8
128-
// CHECK: store float* %[[X1]], float** %[[X_ADDR_ASCAST]], align 8
129-
// CHECK: %[[X_TMP:.*]] = load float*, float** %[[X_ADDR_ASCAST]], align 8
130-
// CHECK: %[[X_ARG:.*]] = bitcast float* %[[X_TMP]] to i8*
131-
// CHECK: call i1 @llvm.amdgcn.is.shared(i8* %[[X_ARG]])
132205

206+
// CHECK-LABEL: @_Z14test_is_sharedPf(
207+
// CHECK-NEXT: entry:
208+
// CHECK-NEXT: [[X:%.*]] = alloca float*, align 8, addrspace(5)
209+
// CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[X]] to float**
210+
// CHECK-NEXT: [[X_ADDR:%.*]] = alloca float*, align 8, addrspace(5)
211+
// CHECK-NEXT: [[X_ADDR_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[X_ADDR]] to float**
212+
// CHECK-NEXT: [[RET:%.*]] = alloca i8, align 1, addrspace(5)
213+
// CHECK-NEXT: [[RET_ASCAST:%.*]] = addrspacecast i8 addrspace(5)* [[RET]] to i8*
214+
// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast float addrspace(1)* [[X_COERCE:%.*]] to float*
215+
// CHECK-NEXT: store float* [[TMP0]], float** [[X_ASCAST]], align 8
216+
// CHECK-NEXT: [[X1:%.*]] = load float*, float** [[X_ASCAST]], align 8
217+
// CHECK-NEXT: store float* [[X1]], float** [[X_ADDR_ASCAST]], align 8
218+
// CHECK-NEXT: [[TMP1:%.*]] = load float*, float** [[X_ADDR_ASCAST]], align 8
219+
// CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[TMP1]] to i8*
220+
// CHECK-NEXT: [[TMP3:%.*]] = call i1 @llvm.amdgcn.is.shared(i8* [[TMP2]])
221+
// CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP3]] to i8
222+
// CHECK-NEXT: store i8 [[FROMBOOL]], i8* [[RET_ASCAST]], align 1
223+
// CHECK-NEXT: ret void
224+
//
133225
__global__ void test_is_shared(float *x){
134226
bool ret = __builtin_amdgcn_is_shared(x);
135227
}

0 commit comments

Comments
 (0)