@@ -2,7 +2,7 @@ test optimize
22set opt_level=speed_and_size
33target x86_64
44
5- function %foo (i64 vmctx, i64, i32, i32) -> i32 fast {
5+ function %pure_hoists (i64 vmctx, i64, i32, i32) -> i32 fast {
66 gv0 = vmctx
77 gv1 = load.i64 notrap aligned readonly gv0+8
88 gv2 = load.i64 notrap aligned gv1
@@ -15,7 +15,7 @@ function %foo(i64 vmctx, i64, i32, i32) -> i32 fast {
1515 jump block2(v5, v2, v3) ; v5 = 0
1616
1717 block2(v6: i32, v7: i32, v15: i32):
18- v9 = load.i64 notrap aligned readonly v0+80
18+ v9 = load.i64 notrap aligned readonly pure v0+80
1919 v8 = uextend.i64 v7
2020 v10 = iadd v9, v8
2121 v11 = load.i32 little heap v10
@@ -37,8 +37,46 @@ function %foo(i64 vmctx, i64, i32, i32) -> i32 fast {
3737 return v12
3838}
3939
40- ; check: v9 = load.i64 notrap aligned readonly v0+80
40+ ; check: v9 = load.i64 notrap aligned readonly pure v0+80
4141; check: block2(v6: i32, v7: i32, v15: i32):
4242; check: v10 = iadd.i64 v9, v8
4343; check: v11 = load.i32 little heap v10
4444; check: brif v19, block2(v12, v21, v19), block4
45+
46+ function %non_pure_does_not_hoist(i64 vmctx, i64, i32, i32) -> i32 fast {
47+ gv0 = vmctx
48+ gv1 = load.i64 notrap aligned readonly gv0+8
49+ gv2 = load.i64 notrap aligned gv1
50+ gv3 = vmctx
51+ gv4 = load.i64 notrap aligned readonly gv3+80
52+ stack_limit = gv2
53+
54+ block0(v0: i64, v1: i64, v2: i32, v3: i32):
55+ v5 = iconst.i32 0
56+ jump block2(v5, v2, v3) ; v5 = 0
57+
58+ block2(v6: i32, v7: i32, v15: i32):
59+ v9 = load.i64 notrap aligned readonly v0+80
60+ v8 = uextend.i64 v7
61+ v10 = iadd v9, v8
62+ v11 = load.i32 little heap v10
63+ v16 = iconst.i32 1
64+ v17 = isub v15, v16 ; v16 = 1
65+ v12 = iadd v6, v11
66+ v4 -> v12
67+ v13 = iconst.i32 4
68+ v14 = iadd v7, v13 ; v13 = 4
69+ brif v17, block2(v12, v14, v17), block4
70+
71+ block4:
72+ jump block3
73+
74+ block3:
75+ jump block1
76+
77+ block1:
78+ return v12
79+ }
80+
81+ ; check: block2(v6: i32, v7: i32, v15: i32):
82+ ; nextln: v9 = load.i64 notrap aligned readonly v0+80
0 commit comments