11// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
2- // RUN: %clang_cc1 -O0 -cl-std=CL2.0 -triple amdgcn-amd-amdhsa -target-cpu gfx942 \
3- // RUN: %s -emit-llvm -o - | FileCheck %s
4-
5- // REQUIRES: amdgpu-registered-target
2+ // RUN: %clang_cc1 -O0 -cl-std=CL1.2 -triple amdgcn-amd-amdhsa -target-cpu gfx942 \
3+ // RUN: %s -emit-llvm -o - -disable-llvm-passes | FileCheck %s
64
75// `Ptr.getElementType()` in `CheckAtomicAlignment` returns
86// %struct.__half2 = type { %union.anon }
@@ -22,22 +20,18 @@ struct __half2 {
2220};
2321
2422// CHECK-LABEL: define dso_local <2 x half> @test_flat_add_2f16(
25- // CHECK-SAME: ptr noundef [[ADDR:%.*]], <2 x half> noundef [[VAL:%.*]]) #[[ATTR0:[0-9]+]] {
23+ // CHECK-SAME: ptr addrspace(5) noundef [[ADDR:%.*]], <2 x half> noundef [[VAL:%.*]]) #[[ATTR0:[0-9]+]] {
2624// CHECK-NEXT: [[ENTRY:.*:]]
27- // CHECK-NEXT: [[RETVAL:%.*]] = alloca <2 x half>, align 4, addrspace(5)
28- // CHECK-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
25+ // CHECK-NEXT: [[ADDR_ADDR:%.*]] = alloca ptr addrspace(5), align 4, addrspace(5)
2926// CHECK-NEXT: [[VAL_ADDR:%.*]] = alloca <2 x half>, align 4, addrspace(5)
30- // CHECK-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
31- // CHECK-NEXT: [[ADDR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[ADDR_ADDR]] to ptr
32- // CHECK-NEXT: [[VAL_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VAL_ADDR]] to ptr
33- // CHECK-NEXT: store ptr [[ADDR]], ptr [[ADDR_ADDR_ASCAST]], align 8
34- // CHECK-NEXT: store <2 x half> [[VAL]], ptr [[VAL_ADDR_ASCAST]], align 4
35- // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ADDR_ADDR_ASCAST]], align 8
36- // CHECK-NEXT: [[TMP1:%.*]] = load <2 x half>, ptr [[VAL_ADDR_ASCAST]], align 4
37- // CHECK-NEXT: [[TMP2:%.*]] = atomicrmw fadd ptr [[TMP0]], <2 x half> [[TMP1]] syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META4:![0-9]+]]
27+ // CHECK-NEXT: store ptr addrspace(5) [[ADDR]], ptr addrspace(5) [[ADDR_ADDR]], align 4
28+ // CHECK-NEXT: store <2 x half> [[VAL]], ptr addrspace(5) [[VAL_ADDR]], align 4
29+ // CHECK-NEXT: [[TMP0:%.*]] = load ptr addrspace(5), ptr addrspace(5) [[ADDR_ADDR]], align 4
30+ // CHECK-NEXT: [[TMP1:%.*]] = load <2 x half>, ptr addrspace(5) [[VAL_ADDR]], align 4
31+ // CHECK-NEXT: [[TMP2:%.*]] = atomicrmw fadd ptr addrspace(5) [[TMP0]], <2 x half> [[TMP1]] syncscope("agent") monotonic, align 4, !amdgpu.no.fine.grained.memory [[META4:![0-9]+]]
3832// CHECK-NEXT: ret <2 x half> [[TMP2]]
3933//
40- half2 test_flat_add_2f16 (__generic short2 * addr , half2 val ) {
34+ half2 test_flat_add_2f16 (short2 * addr , half2 val ) {
4135 return __builtin_amdgcn_flat_atomic_fadd_v2f16 ((struct __half2 * )addr , val );
4236}
4337//.
0 commit comments