diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp index 700dc87d2f821..e90a3a275f67c 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp @@ -818,6 +818,39 @@ static BasicBlock::iterator skipToNonAllocaInsertPt(BasicBlock &BB, return I; } +/// Get the underlying type of a homogeneous aggregate type, or nullptr if the +/// type is non-homogeneous. +static Type *getHomogeneousType(Type *Ty) { + Type *ElemTy = nullptr; + SmallVector WorkList; + WorkList.push_back(Ty); + while (!WorkList.empty()) { + Type *CurTy = WorkList.pop_back_val(); + + // Check if the current type is an aggregate type. + if (auto *VectorTy = dyn_cast(CurTy)) { + WorkList.push_back(VectorTy->getElementType()); + continue; + } + if (auto *ArrayTy = dyn_cast(CurTy)) { + WorkList.push_back(ArrayTy->getElementType()); + continue; + } + if (auto *StructTy = dyn_cast(CurTy)) { + WorkList.append(StructTy->element_begin(), StructTy->element_end()); + continue; + } + + // If not, it must be the same as all other non-aggregate types. + if (!ElemTy) + ElemTy = CurTy; + else if (ElemTy != CurTy) + return nullptr; + } + + return ElemTy; +} + // FIXME: Should try to pick the most likely to be profitable allocas first. bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) { LLVM_DEBUG(dbgs() << "Trying to promote to vector: " << Alloca << '\n'); @@ -828,42 +861,42 @@ bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) { } Type *AllocaTy = Alloca.getAllocatedType(); - auto *VectorTy = dyn_cast(AllocaTy); - if (auto *ArrayTy = dyn_cast(AllocaTy)) { - uint64_t NumElems = 1; - Type *ElemTy; - do { - NumElems *= ArrayTy->getNumElements(); - ElemTy = ArrayTy->getElementType(); - } while ((ArrayTy = dyn_cast(ElemTy))); - - // Check for array of vectors - auto *InnerVectorTy = dyn_cast(ElemTy); - if (InnerVectorTy) { - NumElems *= InnerVectorTy->getNumElements(); - ElemTy = InnerVectorTy->getElementType(); - } + Type *ElemTy = getHomogeneousType(AllocaTy); - if (VectorType::isValidElementType(ElemTy) && NumElems > 0) { - unsigned ElementSize = DL->getTypeSizeInBits(ElemTy) / 8; - if (ElementSize > 0) { - unsigned AllocaSize = DL->getTypeStoreSize(AllocaTy); - // Expand vector if required to match padding of inner type, - // i.e. odd size subvectors. - // Storage size of new vector must match that of alloca for correct - // behaviour of byte offsets and GEP computation. - if (NumElems * ElementSize != AllocaSize) - NumElems = AllocaSize / ElementSize; - if (NumElems > 0 && (AllocaSize % ElementSize) == 0) - VectorTy = FixedVectorType::get(ElemTy, NumElems); - } - } + if (!ElemTy || !VectorType::isValidElementType(ElemTy)) { + LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n"); + return false; } - if (!VectorTy) { - LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n"); + unsigned ElementSizeInBits = DL->getTypeSizeInBits(ElemTy); + if (ElementSizeInBits != DL->getTypeAllocSizeInBits(ElemTy)) { + LLVM_DEBUG(dbgs() << " Cannot convert to vector if the allocation size " + "does not match the type's size\n"); + return false; + } + unsigned ElementSize = ElementSizeInBits / 8; + if (ElementSize == 0) { + LLVM_DEBUG(dbgs() << " Cannot create vector of zero-sized elements\n"); + return false; + } + + // Calculate the size of the corresponding vector, accounting for padding of + // inner types, e.g., odd-sized subvectors. Storage size of new vector must + // match that of alloca for correct behaviour of byte offsets and GEP + // computation. + unsigned AllocaSize = DL->getTypeStoreSize(AllocaTy); + unsigned NumElems = AllocaSize / ElementSize; + if (NumElems == 0) { + LLVM_DEBUG(dbgs() << " Cannot vectorize an empty aggregate type\n"); return false; } + if (NumElems * ElementSize != AllocaSize) { + LLVM_DEBUG( + dbgs() << " Cannot convert type into vector of the same size\n"); + return false; + } + auto *VectorTy = FixedVectorType::get(ElemTy, NumElems); + assert(VectorTy && "Failed to create vector type."); const unsigned MaxElements = (MaxVectorRegs * 32) / DL->getTypeSizeInBits(VectorTy->getElementType()); @@ -895,15 +928,6 @@ bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) { LLVM_DEBUG(dbgs() << " Attempting promotion to: " << *VectorTy << "\n"); - Type *VecEltTy = VectorTy->getElementType(); - unsigned ElementSizeInBits = DL->getTypeSizeInBits(VecEltTy); - if (ElementSizeInBits != DL->getTypeAllocSizeInBits(VecEltTy)) { - LLVM_DEBUG(dbgs() << " Cannot convert to vector if the allocation size " - "does not match the type's size\n"); - return false; - } - unsigned ElementSize = ElementSizeInBits / 8; - assert(ElementSize > 0); for (auto *U : Uses) { Instruction *Inst = cast(U->getUser()); @@ -943,7 +967,7 @@ bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) { if (auto *GEP = dyn_cast(Inst)) { // If we can't compute a vector index from this GEP, then we can't // promote this alloca to vector. - Value *Index = GEPToVectorIndex(GEP, &Alloca, VecEltTy, *DL, NewGEPInsts); + Value *Index = GEPToVectorIndex(GEP, &Alloca, ElemTy, *DL, NewGEPInsts); if (!Index) return RejectUser(Inst, "cannot compute vector index for GEP"); diff --git a/llvm/test/CodeGen/AMDGPU/promote-alloca-structs.ll b/llvm/test/CodeGen/AMDGPU/promote-alloca-structs.ll new file mode 100644 index 0000000000000..1cdd027fef89d --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/promote-alloca-structs.ll @@ -0,0 +1,286 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=amdgpu-promote-alloca-to-vector -amdgpu-promote-alloca-to-vector-limit=512 %s | FileCheck %s + +define i8 @test_v4i8(i32 %bits, i64 %idx) { +; CHECK-LABEL: define i8 @test_v4i8( +; CHECK-SAME: i32 [[BITS:%.*]], i64 [[IDX:%.*]]) { +; CHECK-NEXT: [[STACK:%.*]] = freeze <4 x i8> poison +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[BITS]] to <4 x i8> +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i8> [[TMP1]], i64 [[IDX]] +; CHECK-NEXT: ret i8 [[TMP2]] +; + %stack = alloca <4 x i8>, align 4, addrspace(5) + store i32 %bits, ptr addrspace(5) %stack + %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx + %val = load i8, ptr addrspace(5) %ptr, align 1 + ret i8 %val +} + +define i8 @test_a4i8(i32 %bits, i64 %idx) { +; CHECK-LABEL: define i8 @test_a4i8( +; CHECK-SAME: i32 [[BITS:%.*]], i64 [[IDX:%.*]]) { +; CHECK-NEXT: [[STACK:%.*]] = freeze <4 x i8> poison +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[BITS]] to <4 x i8> +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i8> [[TMP1]], i64 [[IDX]] +; CHECK-NEXT: ret i8 [[TMP2]] +; + %stack = alloca [4 x i8], align 4, addrspace(5) + store i32 %bits, ptr addrspace(5) %stack + %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx + %val = load i8, ptr addrspace(5) %ptr, align 1 + ret i8 %val +} + +define i8 @test_a2v4i8(i64 %bits, i64 %idx) { +; CHECK-LABEL: define i8 @test_a2v4i8( +; CHECK-SAME: i64 [[BITS:%.*]], i64 [[IDX:%.*]]) { +; CHECK-NEXT: [[STACK:%.*]] = freeze <8 x i8> poison +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[BITS]] to <8 x i8> +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x i8> [[TMP1]], i64 [[IDX]] +; CHECK-NEXT: ret i8 [[TMP2]] +; + %stack = alloca [2 x <4 x i8>], align 4, addrspace(5) + store i64 %bits, ptr addrspace(5) %stack + %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx + %val = load i8, ptr addrspace(5) %ptr, align 1 + ret i8 %val +} + +define i8 @test_a2v3i8(i64 %bits, i64 %idx) { +; CHECK-LABEL: define i8 @test_a2v3i8( +; CHECK-SAME: i64 [[BITS:%.*]], i64 [[IDX:%.*]]) { +; CHECK-NEXT: [[STACK:%.*]] = freeze <8 x i8> poison +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[BITS]] to <8 x i8> +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x i8> [[TMP1]], i64 [[IDX]] +; CHECK-NEXT: ret i8 [[TMP2]] +; + %stack = alloca [2 x <3 x i8>], align 4, addrspace(5) + store i64 %bits, ptr addrspace(5) %stack + %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx + %val = load i8, ptr addrspace(5) %ptr, align 1 + ret i8 %val +} + +define i8 @test_a2a4i8(i64 %bits, i64 %idx) { +; CHECK-LABEL: define i8 @test_a2a4i8( +; CHECK-SAME: i64 [[BITS:%.*]], i64 [[IDX:%.*]]) { +; CHECK-NEXT: [[STACK:%.*]] = freeze <8 x i8> poison +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[BITS]] to <8 x i8> +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x i8> [[TMP1]], i64 [[IDX]] +; CHECK-NEXT: ret i8 [[TMP2]] +; + %stack = alloca [2 x [4 x i8]], align 4, addrspace(5) + store i64 %bits, ptr addrspace(5) %stack + %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx + %val = load i8, ptr addrspace(5) %ptr, align 1 + ret i8 %val +} + +define i8 @test_a2a3i8(i48 %bits, i64 %idx) { +; CHECK-LABEL: define i8 @test_a2a3i8( +; CHECK-SAME: i48 [[BITS:%.*]], i64 [[IDX:%.*]]) { +; CHECK-NEXT: [[STACK:%.*]] = freeze <6 x i8> poison +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i48 [[BITS]] to <6 x i8> +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <6 x i8> [[TMP1]], i64 [[IDX]] +; CHECK-NEXT: ret i8 [[TMP2]] +; + %stack = alloca [2 x [3 x i8]], align 4, addrspace(5) + store i48 %bits, ptr addrspace(5) %stack + %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx + %val = load i8, ptr addrspace(5) %ptr, align 1 + ret i8 %val +} + +define i8 @test_s1v4i8(i32 %bits, i64 %idx) { +; CHECK-LABEL: define i8 @test_s1v4i8( +; CHECK-SAME: i32 [[BITS:%.*]], i64 [[IDX:%.*]]) { +; CHECK-NEXT: [[STACK:%.*]] = freeze <4 x i8> poison +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[BITS]] to <4 x i8> +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i8> [[TMP1]], i64 [[IDX]] +; CHECK-NEXT: ret i8 [[TMP2]] +; + %stack = alloca {<4 x i8>}, align 4, addrspace(5) + store i32 %bits, ptr addrspace(5) %stack + %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx + %val = load i8, ptr addrspace(5) %ptr, align 1 + ret i8 %val +} + +define i8 @test_s1a4i8(i32 %bits, i64 %idx) { +; CHECK-LABEL: define i8 @test_s1a4i8( +; CHECK-SAME: i32 [[BITS:%.*]], i64 [[IDX:%.*]]) { +; CHECK-NEXT: [[STACK:%.*]] = freeze <4 x i8> poison +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[BITS]] to <4 x i8> +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i8> [[TMP1]], i64 [[IDX]] +; CHECK-NEXT: ret i8 [[TMP2]] +; + %stack = alloca {[4 x i8]}, align 4, addrspace(5) + store i32 %bits, ptr addrspace(5) %stack + %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx + %val = load i8, ptr addrspace(5) %ptr, align 1 + ret i8 %val +} + +define i8 @test_s4i8(i32 %bits, i64 %idx) { +; CHECK-LABEL: define i8 @test_s4i8( +; CHECK-SAME: i32 [[BITS:%.*]], i64 [[IDX:%.*]]) { +; CHECK-NEXT: [[STACK:%.*]] = freeze <4 x i8> poison +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[BITS]] to <4 x i8> +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i8> [[TMP1]], i64 [[IDX]] +; CHECK-NEXT: ret i8 [[TMP2]] +; + %stack = alloca {i8, i8, i8, i8}, align 4, addrspace(5) + store i32 %bits, ptr addrspace(5) %stack + %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx + %val = load i8, ptr addrspace(5) %ptr, align 1 + ret i8 %val +} + +define i8 @test_s2v4i8(i64 %bits, i64 %idx) { +; CHECK-LABEL: define i8 @test_s2v4i8( +; CHECK-SAME: i64 [[BITS:%.*]], i64 [[IDX:%.*]]) { +; CHECK-NEXT: [[STACK:%.*]] = freeze <8 x i8> poison +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[BITS]] to <8 x i8> +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x i8> [[TMP1]], i64 [[IDX]] +; CHECK-NEXT: ret i8 [[TMP2]] +; + %stack = alloca {<4 x i8>, <4 x i8>}, align 4, addrspace(5) + store i64 %bits, ptr addrspace(5) %stack + %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx + %val = load i8, ptr addrspace(5) %ptr, align 1 + ret i8 %val +} + +define i8 @test_s2v2i8v4i8(i64 %bits, i64 %idx) { +; CHECK-LABEL: define i8 @test_s2v2i8v4i8( +; CHECK-SAME: i64 [[BITS:%.*]], i64 [[IDX:%.*]]) { +; CHECK-NEXT: [[STACK:%.*]] = freeze <8 x i8> poison +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[BITS]] to <8 x i8> +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x i8> [[TMP1]], i64 [[IDX]] +; CHECK-NEXT: ret i8 [[TMP2]] +; + %stack = alloca {<2 x i8>, <4 x i8>}, align 4, addrspace(5) + store i64 %bits, ptr addrspace(5) %stack + %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx + %val = load i8, ptr addrspace(5) %ptr, align 1 + ret i8 %val +} + +define i8 @test_s2v2i8v3i8(i64 %bits, i64 %idx) { +; CHECK-LABEL: define i8 @test_s2v2i8v3i8( +; CHECK-SAME: i64 [[BITS:%.*]], i64 [[IDX:%.*]]) { +; CHECK-NEXT: [[STACK:%.*]] = freeze <8 x i8> poison +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[BITS]] to <8 x i8> +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x i8> [[TMP1]], i64 [[IDX]] +; CHECK-NEXT: ret i8 [[TMP2]] +; + %stack = alloca {<2 x i8>, <3 x i8>}, align 4, addrspace(5) + store i64 %bits, ptr addrspace(5) %stack + %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx + %val = load i8, ptr addrspace(5) %ptr, align 1 + ret i8 %val +} + +define i8 @test_s2s2i8s4i8(i48 %bits, i64 %idx) { +; CHECK-LABEL: define i8 @test_s2s2i8s4i8( +; CHECK-SAME: i48 [[BITS:%.*]], i64 [[IDX:%.*]]) { +; CHECK-NEXT: [[STACK:%.*]] = freeze <6 x i8> poison +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i48 [[BITS]] to <6 x i8> +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <6 x i8> [[TMP1]], i64 [[IDX]] +; CHECK-NEXT: ret i8 [[TMP2]] +; + %stack = alloca {{i8, i8}, {i8, i8, i8, i8}}, align 4, addrspace(5) + store i48 %bits, ptr addrspace(5) %stack + %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx + %val = load i8, ptr addrspace(5) %ptr, align 1 + ret i8 %val +} + +define i8 @test_s2s2i8s3i8(i40 %bits, i64 %idx) { +; CHECK-LABEL: define i8 @test_s2s2i8s3i8( +; CHECK-SAME: i40 [[BITS:%.*]], i64 [[IDX:%.*]]) { +; CHECK-NEXT: [[STACK:%.*]] = freeze <5 x i8> poison +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i40 [[BITS]] to <5 x i8> +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <5 x i8> [[TMP1]], i64 [[IDX]] +; CHECK-NEXT: ret i8 [[TMP2]] +; + %stack = alloca {{i8, i8}, {i8, i8, i8}}, align 4, addrspace(5) + store i40 %bits, ptr addrspace(5) %stack + %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx + %val = load i8, ptr addrspace(5) %ptr, align 1 + ret i8 %val +} + +define i8 @test_s3i8s1i8v2i8(i32 %bits, i64 %idx) { +; CHECK-LABEL: define i8 @test_s3i8s1i8v2i8( +; CHECK-SAME: i32 [[BITS:%.*]], i64 [[IDX:%.*]]) { +; CHECK-NEXT: [[STACK:%.*]] = freeze <4 x i8> poison +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[BITS]] to <4 x i8> +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i8> [[TMP1]], i64 [[IDX]] +; CHECK-NEXT: ret i8 [[TMP2]] +; + %stack = alloca {i8, {i8}, <2 x i8>}, align 4, addrspace(5) + store i32 %bits, ptr addrspace(5) %stack + %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx + %val = load i8, ptr addrspace(5) %ptr, align 1 + ret i8 %val +} + +define i8 @test_s3i8i8s0(i16 %bits, i64 %idx) { +; CHECK-LABEL: define i8 @test_s3i8i8s0( +; CHECK-SAME: i16 [[BITS:%.*]], i64 [[IDX:%.*]]) { +; CHECK-NEXT: [[STACK:%.*]] = freeze <2 x i8> poison +; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[BITS]] to <2 x i8> +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i8> [[TMP1]], i64 [[IDX]] +; CHECK-NEXT: ret i8 [[TMP2]] +; + %stack = alloca {i8, i8, {}}, align 4, addrspace(5) + store i16 %bits, ptr addrspace(5) %stack + %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx + %val = load i8, ptr addrspace(5) %ptr, align 1 + ret i8 %val +} + +; heterogeneous element types are not supported +define i8 @test_heterogeneous(i32 %bits, i64 %idx) { +; CHECK-LABEL: define i8 @test_heterogeneous( +; CHECK-SAME: i32 [[BITS:%.*]], i64 [[IDX:%.*]]) { +; CHECK-NEXT: [[STACK:%.*]] = alloca { i8, i8, i16 }, align 4, addrspace(5) +; CHECK-NEXT: store i32 [[BITS]], ptr addrspace(5) [[STACK]], align 4 +; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i8, ptr addrspace(5) [[STACK]], i64 [[IDX]] +; CHECK-NEXT: [[VAL:%.*]] = load i8, ptr addrspace(5) [[PTR]], align 1 +; CHECK-NEXT: ret i8 [[VAL]] +; + %stack = alloca {i8, i8, i16}, align 4, addrspace(5) + store i32 %bits, ptr addrspace(5) %stack + %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx + %val = load i8, ptr addrspace(5) %ptr, align 1 + ret i8 %val +} + +; empty types are not supported +define void @test_empty() { +; CHECK-LABEL: define void @test_empty() { +; CHECK-NEXT: [[STACK:%.*]] = alloca {}, align 4, addrspace(5) +; CHECK-NEXT: ret void +; + %stack = alloca {}, align 4, addrspace(5) + ret void +} + +; singleton types are not supported +define i8 @test_singleton(i8 %bits, i64 %idx) { +; CHECK-LABEL: define i8 @test_singleton( +; CHECK-SAME: i8 [[BITS:%.*]], i64 [[IDX:%.*]]) { +; CHECK-NEXT: [[STACK:%.*]] = alloca { i8, {} }, align 4, addrspace(5) +; CHECK-NEXT: store i8 [[BITS]], ptr addrspace(5) [[STACK]], align 1 +; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i8, ptr addrspace(5) [[STACK]], i64 [[IDX]] +; CHECK-NEXT: [[VAL:%.*]] = load i8, ptr addrspace(5) [[PTR]], align 1 +; CHECK-NEXT: ret i8 [[VAL]] +; + %stack = alloca {i8, {}}, align 4, addrspace(5) + store i8 %bits, ptr addrspace(5) %stack + %ptr = getelementptr inbounds i8, ptr addrspace(5) %stack, i64 %idx + %val = load i8, ptr addrspace(5) %ptr, align 1 + ret i8 %val +}