|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| 2 | +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=amdgpu-promote-alloca -disable-promote-alloca-to-lds=1 < %s | FileCheck %s |
| 3 | + |
| 4 | +; Check that the extracted index is correctly sign-extended when 32-bit scratch |
| 5 | +; address arithmetic is promoted to 64-bit vector index arithmetic. |
| 6 | + |
| 7 | +define amdgpu_kernel void @negative_index_byte(ptr %out, i64 %offset) { |
| 8 | +; CHECK-LABEL: @negative_index_byte( |
| 9 | +; CHECK-NEXT: [[STACK:%.*]] = freeze <4 x i8> poison |
| 10 | +; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i8> [[STACK]], i8 0, i32 0 |
| 11 | +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i8> [[TMP1]], i8 1, i32 1 |
| 12 | +; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i8> [[TMP2]], i8 2, i32 2 |
| 13 | +; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i8> [[TMP3]], i8 3, i32 3 |
| 14 | +; CHECK-NEXT: [[TMP5:%.*]] = add i64 -1, [[OFFSET:%.*]] |
| 15 | +; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i8> [[TMP4]], i64 [[TMP5]] |
| 16 | +; CHECK-NEXT: store i8 [[TMP6]], ptr [[OUT:%.*]], align 1 |
| 17 | +; CHECK-NEXT: ret void |
| 18 | +; |
| 19 | + %stack = alloca [4 x i8], align 4, addrspace(5) |
| 20 | + %gep.0 = getelementptr inbounds [4 x i8], ptr addrspace(5) %stack, i64 0, i64 0 |
| 21 | + %gep.1 = getelementptr inbounds [4 x i8], ptr addrspace(5) %stack, i64 0, i64 1 |
| 22 | + %gep.2 = getelementptr inbounds [4 x i8], ptr addrspace(5) %stack, i64 0, i64 2 |
| 23 | + %gep.3 = getelementptr inbounds [4 x i8], ptr addrspace(5) %stack, i64 0, i64 3 |
| 24 | + store i8 0, ptr addrspace(5) %gep.0 |
| 25 | + store i8 1, ptr addrspace(5) %gep.1 |
| 26 | + store i8 2, ptr addrspace(5) %gep.2 |
| 27 | + store i8 3, ptr addrspace(5) %gep.3 |
| 28 | + %vgep = getelementptr inbounds [4 x i8], ptr addrspace(5) %stack, i64 0, i64 %offset |
| 29 | + %cgep = getelementptr inbounds [4 x i8], ptr addrspace(5) %vgep, i64 0, i64 -1 |
| 30 | + %load = load i8, ptr addrspace(5) %cgep |
| 31 | + store i8 %load, ptr %out |
| 32 | + ret void |
| 33 | +} |
| 34 | + |
| 35 | +define amdgpu_kernel void @negative_index_word(ptr %out, i64 %offset) { |
| 36 | +; CHECK-LABEL: @negative_index_word( |
| 37 | +; CHECK-NEXT: [[STACK:%.*]] = freeze <4 x i32> poison |
| 38 | +; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[STACK]], i32 0, i32 0 |
| 39 | +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 1, i32 1 |
| 40 | +; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 2, i32 2 |
| 41 | +; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> [[TMP3]], i32 3, i32 3 |
| 42 | +; CHECK-NEXT: [[TMP5:%.*]] = add i64 -1, [[OFFSET:%.*]] |
| 43 | +; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i32> [[TMP4]], i64 [[TMP5]] |
| 44 | +; CHECK-NEXT: store i32 [[TMP6]], ptr [[OUT:%.*]], align 4 |
| 45 | +; CHECK-NEXT: ret void |
| 46 | +; |
| 47 | + %stack = alloca [4 x i32], align 4, addrspace(5) |
| 48 | + %gep.0 = getelementptr inbounds [4 x i32], ptr addrspace(5) %stack, i64 0, i64 0 |
| 49 | + %gep.1 = getelementptr inbounds [4 x i32], ptr addrspace(5) %stack, i64 0, i64 1 |
| 50 | + %gep.2 = getelementptr inbounds [4 x i32], ptr addrspace(5) %stack, i64 0, i64 2 |
| 51 | + %gep.3 = getelementptr inbounds [4 x i32], ptr addrspace(5) %stack, i64 0, i64 3 |
| 52 | + store i32 0, ptr addrspace(5) %gep.0 |
| 53 | + store i32 1, ptr addrspace(5) %gep.1 |
| 54 | + store i32 2, ptr addrspace(5) %gep.2 |
| 55 | + store i32 3, ptr addrspace(5) %gep.3 |
| 56 | + %vgep = getelementptr inbounds [4 x i32], ptr addrspace(5) %stack, i64 0, i64 %offset |
| 57 | + %cgep = getelementptr inbounds [4 x i32], ptr addrspace(5) %vgep, i64 0, i64 -1 |
| 58 | + %load = load i32, ptr addrspace(5) %cgep |
| 59 | + store i32 %load, ptr %out |
| 60 | + ret void |
| 61 | +} |
| 62 | + |
| 63 | + |
0 commit comments