@@ -67,8 +67,8 @@ define i32 @va1(ptr %fmt, ...) {
6767 ; RV32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va)
6868 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (p0) from %ir.va)
6969 ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
70- ; RV32-NEXT: [[PTR_ADD7:%[0-9]+]] :_(p0) = nuw nusw inbounds G_PTR_ADD [[LOAD]], [[C1]](s32)
71- ; RV32-NEXT: G_STORE [[PTR_ADD7]] (p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va)
70+ ; RV32-NEXT: %20 :_(p0) = nuw nusw inbounds G_PTR_ADD [[LOAD]], [[C1]](s32)
71+ ; RV32-NEXT: G_STORE %20 (p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va)
7272 ; RV32-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur)
7373 ; RV32-NEXT: $x10 = COPY [[LOAD1]](s32)
7474 ; RV32-NEXT: PseudoRET implicit $x10
@@ -105,8 +105,8 @@ define i32 @va1(ptr %fmt, ...) {
105105 ; RV64-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va)
106106 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (p0) from %ir.va, align 4)
107107 ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
108- ; RV64-NEXT: [[PTR_ADD7:%[0-9]+]] :_(p0) = nuw nusw inbounds G_PTR_ADD [[LOAD]], [[C1]](s64)
109- ; RV64-NEXT: G_STORE [[PTR_ADD7]] (p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va, align 4)
108+ ; RV64-NEXT: %20 :_(p0) = nuw nusw inbounds G_PTR_ADD [[LOAD]], [[C1]](s64)
109+ ; RV64-NEXT: G_STORE %20 (p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va, align 4)
110110 ; RV64-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur)
111111 ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD1]](s32)
112112 ; RV64-NEXT: $x10 = COPY [[ANYEXT]](s64)
@@ -687,8 +687,8 @@ define i64 @va2(ptr %fmt, ...) nounwind {
687687 ; RV32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]]
688688 ; RV32-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32)
689689 ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
690- ; RV32-NEXT: [[PTR_ADD7:%[0-9]+]] :_(p0) = nuw nusw inbounds G_PTR_ADD [[INTTOPTR]], [[C3]](s32)
691- ; RV32-NEXT: G_STORE [[PTR_ADD7]] (p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va)
690+ ; RV32-NEXT: %25 :_(p0) = nuw nusw inbounds G_PTR_ADD [[INTTOPTR]], [[C3]](s32)
691+ ; RV32-NEXT: G_STORE %25 (p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va)
692692 ; RV32-NEXT: [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32)
693693 ; RV32-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3)
694694 ; RV32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](s64)
@@ -733,8 +733,8 @@ define i64 @va2(ptr %fmt, ...) nounwind {
733733 ; RV64-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]]
734734 ; RV64-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32)
735735 ; RV64-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
736- ; RV64-NEXT: [[PTR_ADD7:%[0-9]+]] :_(p0) = nuw nusw inbounds G_PTR_ADD [[INTTOPTR]], [[C3]](s64)
737- ; RV64-NEXT: G_STORE [[PTR_ADD7]] (p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va, align 4)
736+ ; RV64-NEXT: %25 :_(p0) = nuw nusw inbounds G_PTR_ADD [[INTTOPTR]], [[C3]](s64)
737+ ; RV64-NEXT: G_STORE %25 (p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va, align 4)
738738 ; RV64-NEXT: [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32)
739739 ; RV64-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3)
740740 ; RV64-NEXT: $x10 = COPY [[LOAD1]](s64)
@@ -974,8 +974,8 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind {
974974 ; RV32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]]
975975 ; RV32-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32)
976976 ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
977- ; RV32-NEXT: [[PTR_ADD5:%[0-9]+]] :_(p0) = nuw nusw inbounds G_PTR_ADD [[INTTOPTR]], [[C3]](s32)
978- ; RV32-NEXT: G_STORE [[PTR_ADD5]] (p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va)
977+ ; RV32-NEXT: %24 :_(p0) = nuw nusw inbounds G_PTR_ADD [[INTTOPTR]], [[C3]](s32)
978+ ; RV32-NEXT: G_STORE %24 (p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va)
979979 ; RV32-NEXT: [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32)
980980 ; RV32-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3)
981981 ; RV32-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[MV]], [[LOAD1]]
@@ -1020,8 +1020,8 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind {
10201020 ; RV64-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]]
10211021 ; RV64-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32)
10221022 ; RV64-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
1023- ; RV64-NEXT: [[PTR_ADD6:%[0-9]+]] :_(p0) = nuw nusw inbounds G_PTR_ADD [[INTTOPTR]], [[C3]](s64)
1024- ; RV64-NEXT: G_STORE [[PTR_ADD6]] (p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va, align 4)
1023+ ; RV64-NEXT: %25 :_(p0) = nuw nusw inbounds G_PTR_ADD [[INTTOPTR]], [[C3]](s64)
1024+ ; RV64-NEXT: G_STORE %25 (p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va, align 4)
10251025 ; RV64-NEXT: [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32)
10261026 ; RV64-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3)
10271027 ; RV64-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[COPY1]], [[LOAD1]]
@@ -1724,8 +1724,8 @@ define i32 @va_large_stack(ptr %fmt, ...) {
17241724 ; RV32-NEXT: G_VASTART [[FRAME_INDEX2]](p0) :: (store (s32) into %ir.va)
17251725 ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.va)
17261726 ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
1727- ; RV32-NEXT: [[PTR_ADD7:%[0-9]+]] :_(p0) = nuw nusw inbounds G_PTR_ADD [[LOAD]], [[C1]](s32)
1728- ; RV32-NEXT: G_STORE [[PTR_ADD7]] (p0), [[FRAME_INDEX2]](p0) :: (store (p0) into %ir.va)
1727+ ; RV32-NEXT: %21 :_(p0) = nuw nusw inbounds G_PTR_ADD [[LOAD]], [[C1]](s32)
1728+ ; RV32-NEXT: G_STORE %21 (p0), [[FRAME_INDEX2]](p0) :: (store (p0) into %ir.va)
17291729 ; RV32-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur)
17301730 ; RV32-NEXT: $x10 = COPY [[LOAD1]](s32)
17311731 ; RV32-NEXT: PseudoRET implicit $x10
@@ -1763,8 +1763,8 @@ define i32 @va_large_stack(ptr %fmt, ...) {
17631763 ; RV64-NEXT: G_VASTART [[FRAME_INDEX2]](p0) :: (store (s64) into %ir.va)
17641764 ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.va, align 4)
17651765 ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
1766- ; RV64-NEXT: [[PTR_ADD7:%[0-9]+]] :_(p0) = nuw nusw inbounds G_PTR_ADD [[LOAD]], [[C1]](s64)
1767- ; RV64-NEXT: G_STORE [[PTR_ADD7]] (p0), [[FRAME_INDEX2]](p0) :: (store (p0) into %ir.va, align 4)
1766+ ; RV64-NEXT: %21 :_(p0) = nuw nusw inbounds G_PTR_ADD [[LOAD]], [[C1]](s64)
1767+ ; RV64-NEXT: G_STORE %21 (p0), [[FRAME_INDEX2]](p0) :: (store (p0) into %ir.va, align 4)
17681768 ; RV64-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur)
17691769 ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD1]](s32)
17701770 ; RV64-NEXT: $x10 = COPY [[ANYEXT]](s64)
0 commit comments