|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 |
| 2 | +; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -O3 --amdgpu-lower-module-lds-strategy=module < %s | FileCheck -check-prefix=GCN %s |
| 3 | +; RUN: opt -S -mtriple=amdgcn-- -amdgpu-lower-module-lds --amdgpu-lower-module-lds-strategy=module < %s | FileCheck %s |
| 4 | +; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds --amdgpu-lower-module-lds-strategy=module < %s | FileCheck %s |
| 5 | + |
| 6 | +@a = internal unnamed_addr addrspace(3) global [64 x i32] poison, align 4 |
| 7 | +@b = internal unnamed_addr addrspace(3) global [64 x i32] poison, align 4 |
| 8 | +@c = internal unnamed_addr addrspace(3) global [64 x i32] poison, align 4 |
| 9 | + |
| 10 | +define amdgpu_kernel void @ds_load_stores_aainfo(ptr addrspace(1) %arg, i32 %i) { |
| 11 | +; GCN-LABEL: ds_load_stores_aainfo: |
| 12 | +; GCN: ; %bb.0: ; %bb |
| 13 | +; GCN-NEXT: s_load_dword s0, s[4:5], 0x2c |
| 14 | +; GCN-NEXT: v_mov_b32_e32 v0, 1 |
| 15 | +; GCN-NEXT: v_mov_b32_e32 v1, 0 |
| 16 | +; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| 17 | +; GCN-NEXT: s_lshl_b32 s0, s0, 2 |
| 18 | +; GCN-NEXT: v_mov_b32_e32 v4, s0 |
| 19 | +; GCN-NEXT: ds_read2_b32 v[2:3], v4 offset1:1 |
| 20 | +; GCN-NEXT: ds_write_b64 v1, v[0:1] offset:512 |
| 21 | +; GCN-NEXT: ds_read2_b32 v[4:5], v4 offset0:64 offset1:65 |
| 22 | +; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 |
| 23 | +; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(1) SyncID(0) |
| 24 | +; GCN-NEXT: ; sched_group_barrier mask(0x00000200) size(1) SyncID(0) |
| 25 | +; GCN-NEXT: ; sched_group_barrier mask(0x00000100) size(1) SyncID(0) |
| 26 | +; GCN-NEXT: s_waitcnt lgkmcnt(0) |
| 27 | +; GCN-NEXT: v_add_co_u32_e32 v2, vcc, v2, v4 |
| 28 | +; GCN-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v5, vcc |
| 29 | +; GCN-NEXT: global_store_dwordx2 v1, v[2:3], s[0:1] |
| 30 | +; GCN-NEXT: s_endpgm |
| 31 | +; CHECK-LABEL: define amdgpu_kernel void @ds_load_stores_aainfo( |
| 32 | +; CHECK-SAME: ptr addrspace(1) [[ARG:%.*]], i32 [[I:%.*]]) #[[ATTR0:[0-9]+]] { |
| 33 | +; CHECK-NEXT: [[BB:.*:]] |
| 34 | +; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds [64 x i32], ptr addrspace(3) @llvm.amdgcn.kernel.ds_load_stores_aainfo.lds, i32 0, i32 [[I]] |
| 35 | +; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds [64 x i32], ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_KERNEL_DS_LOAD_STORES_AAINFO_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.kernel.ds_load_stores_aainfo.lds, i32 0, i32 1), i32 0, i32 [[I]] |
| 36 | +; CHECK-NEXT: [[VAL_A:%.*]] = load i64, ptr addrspace(3) [[GEP_A]], align 4, !tbaa [[TBAA1:![0-9]+]], !alias.scope [[META4:![0-9]+]], !noalias [[META7:![0-9]+]] |
| 37 | +; CHECK-NEXT: [[VAL_B:%.*]] = load i64, ptr addrspace(3) [[GEP_B]], align 4, !tbaa [[TBAA1]], !alias.scope [[META12:![0-9]+]], !noalias [[META13:![0-9]+]] |
| 38 | +; CHECK-NEXT: store i64 1, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_KERNEL_DS_LOAD_STORES_AAINFO_LDS_T]], ptr addrspace(3) @llvm.amdgcn.kernel.ds_load_stores_aainfo.lds, i32 0, i32 2), align 16, !tbaa [[TBAA1]], !alias.scope [[META14:![0-9]+]], !noalias [[META15:![0-9]+]] |
| 39 | +; CHECK-NEXT: [[VAL:%.*]] = add i64 [[VAL_A]], [[VAL_B]] |
| 40 | +; CHECK-NEXT: store i64 [[VAL]], ptr addrspace(1) [[ARG]], align 4 |
| 41 | +; CHECK-NEXT: tail call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 1, i32 0) |
| 42 | +; CHECK-NEXT: tail call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 1, i32 0) |
| 43 | +; CHECK-NEXT: tail call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 1, i32 0) |
| 44 | +; CHECK-NEXT: ret void |
| 45 | +; |
| 46 | +bb: |
| 47 | + %gep.a = getelementptr inbounds [64 x i32], ptr addrspace(3) @a, i32 0, i32 %i |
| 48 | + %gep.b = getelementptr inbounds [64 x i32], ptr addrspace(3) @b, i32 0, i32 %i |
| 49 | + |
| 50 | + %val.a = load i64, ptr addrspace(3) %gep.a, align 4, !tbaa !0, !noalias !5 |
| 51 | + %val.b = load i64, ptr addrspace(3) %gep.b, align 4, !tbaa !0, !noalias !5 |
| 52 | + |
| 53 | + store i64 1, ptr addrspace(3) @c, align 4, !tbaa !0, !noalias !2 |
| 54 | + |
| 55 | + %val = add i64 %val.a, %val.b |
| 56 | + store i64 %val, ptr addrspace(1) %arg, align 4 |
| 57 | + |
| 58 | + tail call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 1, i32 0) |
| 59 | + tail call void @llvm.amdgcn.sched.group.barrier(i32 512, i32 1, i32 0) |
| 60 | + tail call void @llvm.amdgcn.sched.group.barrier(i32 256, i32 1, i32 0) |
| 61 | + ret void |
| 62 | +} |
| 63 | + |
| 64 | + !0 = !{!"omnipotent char", !1, i64 0} |
| 65 | + !1 = !{!1} |
| 66 | + !2 = !{!3} |
| 67 | + !3 = distinct !{!3, !4} |
| 68 | + !4 = distinct !{!4} |
| 69 | + !5 = !{!3} |
| 70 | +;. |
| 71 | +; CHECK: [[TBAA1]] = !{[[META2:![0-9]+]], [[META2]], i64 0, i64 0} |
| 72 | +; CHECK: [[META2]] = !{!"omnipotent char", [[META3:![0-9]+]]} |
| 73 | +; CHECK: [[META3]] = distinct !{[[META3]]} |
| 74 | +; CHECK: [[META4]] = !{[[META5:![0-9]+]]} |
| 75 | +; CHECK: [[META5]] = distinct !{[[META5]], [[META6:![0-9]+]]} |
| 76 | +; CHECK: [[META6]] = distinct !{[[META6]]} |
| 77 | +; CHECK: [[META7]] = !{[[META8:![0-9]+]], [[META10:![0-9]+]], [[META11:![0-9]+]]} |
| 78 | +; CHECK: [[META8]] = distinct !{[[META8]], [[META9:![0-9]+]]} |
| 79 | +; CHECK: [[META9]] = distinct !{[[META9]]} |
| 80 | +; CHECK: [[META10]] = distinct !{[[META10]], [[META6]]} |
| 81 | +; CHECK: [[META11]] = distinct !{[[META11]], [[META6]]} |
| 82 | +; CHECK: [[META12]] = !{[[META10]]} |
| 83 | +; CHECK: [[META13]] = !{[[META8]], [[META5]], [[META11]]} |
| 84 | +; CHECK: [[META14]] = !{[[META11]]} |
| 85 | +; CHECK: [[META15]] = !{[[META8]], [[META5]], [[META10]]} |
| 86 | +;. |
0 commit comments