|
| 1 | +;=========================== begin_copyright_notice ============================ |
| 2 | +; |
| 3 | +; Copyright (C) 2024 Intel Corporation |
| 4 | +; |
| 5 | +; SPDX-License-Identifier: MIT |
| 6 | +; |
| 7 | +;============================ end_copyright_notice ============================= |
| 8 | + |
| 9 | +; REQUIRES: regkeys |
| 10 | +; RUN: igc_opt -debugify --igc-gep-loop-strength-reduction -check-debugify -S < %s 2>&1 | FileCheck %s |
| 11 | + |
| 12 | +; Reduced index is expressed with SCEVMulExpr. |
| 13 | + |
| 14 | +; Debug-info related check |
| 15 | +; CHECK: CheckModuleDebugify: PASS |
| 16 | + |
| 17 | +%"class.IntVector" = type { <1024 x i64> } |
| 18 | + |
| 19 | +define spir_kernel void @test(i32 addrspace(1)* %p, i32 addrspace(1)* %t, i32 %k, i32 %n, i64 %multiplier, <1024 x i64> addrspace(1)* %otp) { |
| 20 | +entry: |
| 21 | + %_alloca = alloca %"class.IntVector", align 8 |
| 22 | + %vecPtr = getelementptr %"class.IntVector", %"class.IntVector"* %_alloca, i32 0, i32 0 |
| 23 | + store <1024 x i64> zeroinitializer, <1024 x i64>* %vecPtr, align 8 |
| 24 | + %loadedVec = load <1024 x i64>, <1024 x i64>* %vecPtr, align 8 |
| 25 | + %cmp1 = icmp slt i32 0, %n |
| 26 | + br i1 %cmp1, label %for.body.lr.ph, label %for.end |
| 27 | + |
| 28 | +; CHECK-LABEL: for.body.lr.ph: |
| 29 | +; Check that GepLSR was applied to the gep index where i64 multiplication was used. |
| 30 | +; CHECK: [[MULL:%.*]] = mul i64 %multiplier, 44 |
| 31 | +; CHECK: [[GEP_PHI1:%.*]] = getelementptr i32, i32 addrspace(1)* %p, i64 [[MULL]] |
| 32 | +; CHECK: [[STEP:%.*]] = shl i64 %multiplier, 1 |
| 33 | + |
| 34 | +; Check that GepLSR was NOT applied to the gep index where NO i64 multiplication was used. |
| 35 | +; CHECK-NOT: add i32 %k, -69 |
| 36 | +; CHECK-NOT: getelementptr i32, i32 addrspace(1)* %t |
| 37 | +for.body.lr.ph: ; preds = %entry |
| 38 | + br label %for.body |
| 39 | + |
| 40 | +; CHECK-LABEL: for.body: |
| 41 | + |
| 42 | +; Check that GepLSR was applied to the gep index where i64 multiplication was used. |
| 43 | +; CHECK: [[GEP:%.*]] = phi i32 addrspace(1)* [ [[GEP_PHI1]], %for.body.lr.ph ], [ [[GEP_PHI2:%.*]], %for.body ] |
| 44 | +; CHECK: %i.02 = phi i32 [ 39, %for.body.lr.ph ], [ %inc, %for.body ] |
| 45 | +; CHECK: store i32 11, i32 addrspace(1)* [[GEP]], align 4 |
| 46 | + |
| 47 | +; Check that GepLSR was NOT applied to the gep index where NO i64 multiplication was used. |
| 48 | +; CHECK-NOT: getelementptr i32, i32 addrspace(1)* [[VAR:.*]], i64 -2 |
| 49 | +; CHECK: %add1 = add nsw i32 %i.02, 30 |
| 50 | +; CHECK: %sub1 = sub nsw i32 %k, %add1 |
| 51 | +; CHECK: %idxprom1 = zext i32 %sub1 to i64 |
| 52 | +; CHECK: %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %t, i64 %idxprom1 |
| 53 | + |
| 54 | +; CHECK: %inc = add nuw nsw i32 %i.02, 2 |
| 55 | +; CHECK: %cmp = icmp slt i32 %inc, %n |
| 56 | +; CHECK: [[GEP_PHI2]] = getelementptr i32, i32 addrspace(1)* [[GEP]], i64 [[STEP]] |
| 57 | +; CHECK: br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge |
| 58 | +for.body: ; preds = %for.body.lr.ph, %for.body |
| 59 | + %i.02 = phi i32 [ 39, %for.body.lr.ph ], [ %inc, %for.body ] |
| 60 | + %add = add nsw i32 %i.02, 5 |
| 61 | + %zext = zext i32 %add to i64 |
| 62 | + %idxprom = mul i64 %zext, %multiplier |
| 63 | + %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %p, i64 %idxprom |
| 64 | + store i32 11, i32 addrspace(1)* %arrayidx, align 4 |
| 65 | + %add1 = add nsw i32 %i.02, 30 |
| 66 | + %sub1 = sub nsw i32 %k, %add1 |
| 67 | + %idxprom1 = zext i32 %sub1 to i64 |
| 68 | + %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %t, i64 %idxprom1 |
| 69 | + store i32 77, i32 addrspace(1)* %arrayidx1, align 4 |
| 70 | + %inc = add nuw nsw i32 %i.02, 2 |
| 71 | + %cmp = icmp slt i32 %inc, %n |
| 72 | + br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge |
| 73 | + |
| 74 | +for.cond.for.end_crit_edge: ; preds = %for.body |
| 75 | + br label %for.end |
| 76 | + |
| 77 | +for.end: ; preds = %for.cond.for.end_crit_edge, %entry |
| 78 | + store <1024 x i64> %loadedVec, <1024 x i64> addrspace(1)* %otp, align 8 |
| 79 | + ret void |
| 80 | +} |
| 81 | + |
| 82 | +!igc.functions = !{!0} |
| 83 | + |
| 84 | +!0 = !{void (i32 addrspace(1)*, i32 addrspace(1)*, i32, i32, i64, <1024 x i64> addrspace(1)*)* @test, !1} |
| 85 | +!1 = !{!2} |
| 86 | +!2 = !{!"function_type", i32 0} |
0 commit comments