14
14
body : |
15
15
bb.0:
16
16
; CHECK-LABEL: name: fold_frame_index__s_add_i32__fi_const
17
- ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0
18
- ; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[S_MOV_B32_]], 128, implicit-def $scc
17
+ ; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 %stack.0, 128, implicit-def $scc
19
18
; CHECK-NEXT: $sgpr4 = COPY [[S_ADD_I32_]]
20
19
; CHECK-NEXT: SI_RETURN implicit $sgpr4
21
20
%0:sreg_32 = S_MOV_B32 %stack.0
35
34
body : |
36
35
bb.0:
37
36
; CHECK-LABEL: name: fold_frame_index__s_add_i32__const_fi
38
- ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0
39
- ; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 128, [[S_MOV_B32_]], implicit-def $scc
37
+ ; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 128, %stack.0, implicit-def $scc
40
38
; CHECK-NEXT: $sgpr4 = COPY [[S_ADD_I32_]]
41
39
; CHECK-NEXT: SI_RETURN implicit $sgpr4
42
40
%0:sreg_32 = S_MOV_B32 %stack.0
56
54
body : |
57
55
bb.0:
58
56
; CHECK-LABEL: name: fold_frame_index__s_add_i32__materializedconst_fi
59
- ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0
60
- ; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 256, [[S_MOV_B32_]], implicit-def $scc
57
+ ; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 256, %stack.0, implicit-def $scc
61
58
; CHECK-NEXT: $sgpr4 = COPY [[S_ADD_I32_]]
62
59
; CHECK-NEXT: SI_RETURN implicit $sgpr4
63
60
%0:sreg_32 = S_MOV_B32 256
@@ -101,8 +98,7 @@ stack:
101
98
body : |
102
99
bb.0:
103
100
; CHECK-LABEL: name: fold_frame_index__s_add_i32__fi_materializedconst_1
104
- ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 %stack.0
105
- ; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 256, [[S_MOV_B32_]], implicit-def $scc
101
+ ; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 256, %stack.0, implicit-def $scc
106
102
; CHECK-NEXT: $sgpr4 = COPY [[S_ADD_I32_]]
107
103
; CHECK-NEXT: SI_RETURN implicit $sgpr4
108
104
%0:sreg_32 = S_MOV_B32 256
@@ -173,8 +169,7 @@ stack:
173
169
body : |
174
170
bb.0:
175
171
; CHECK-LABEL: name: fold_frame_index__v_add_u32_e32__const_v_fi
176
- ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
177
- ; CHECK-NEXT: [[V_ADD_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 128, [[V_MOV_B32_e32_]], implicit $exec
172
+ ; CHECK: [[V_ADD_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 128, %stack.0, implicit $exec
178
173
; CHECK-NEXT: $sgpr4 = COPY [[V_ADD_U32_e32_]]
179
174
; CHECK-NEXT: SI_RETURN implicit $sgpr4
180
175
%0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
@@ -215,21 +210,10 @@ stack:
215
210
- { id: 0, size: 16384, alignment: 4, local-offset: 0 }
216
211
body : |
217
212
bb.0:
218
- ; GFX9-LABEL: name: fold_frame_index__v_add_u32_e64__imm_v_fi
219
- ; GFX9: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
220
- ; GFX9-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 64, [[V_MOV_B32_e32_]], 0, implicit $exec
221
- ; GFX9-NEXT: $sgpr4 = COPY [[V_ADD_U32_e64_]]
222
- ; GFX9-NEXT: SI_RETURN implicit $sgpr4
223
- ;
224
- ; GFX10-LABEL: name: fold_frame_index__v_add_u32_e64__imm_v_fi
225
- ; GFX10: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 64, %stack.0, 0, implicit $exec
226
- ; GFX10-NEXT: $sgpr4 = COPY [[V_ADD_U32_e64_]]
227
- ; GFX10-NEXT: SI_RETURN implicit $sgpr4
228
- ;
229
- ; GFX12-LABEL: name: fold_frame_index__v_add_u32_e64__imm_v_fi
230
- ; GFX12: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 64, %stack.0, 0, implicit $exec
231
- ; GFX12-NEXT: $sgpr4 = COPY [[V_ADD_U32_e64_]]
232
- ; GFX12-NEXT: SI_RETURN implicit $sgpr4
213
+ ; CHECK-LABEL: name: fold_frame_index__v_add_u32_e64__imm_v_fi
214
+ ; CHECK: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 64, %stack.0, 0, implicit $exec
215
+ ; CHECK-NEXT: $sgpr4 = COPY [[V_ADD_U32_e64_]]
216
+ ; CHECK-NEXT: SI_RETURN implicit $sgpr4
233
217
%0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
234
218
%1:vgpr_32 = V_ADD_U32_e64 64, %0, 0, implicit $exec
235
219
$sgpr4 = COPY %1
@@ -246,21 +230,10 @@ stack:
246
230
- { id: 0, size: 16384, alignment: 4, local-offset: 0 }
247
231
body : |
248
232
bb.0:
249
- ; GFX9-LABEL: name: fold_frame_index__v_add_u32_e64___v_fi_imm
250
- ; GFX9: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
251
- ; GFX9-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[V_MOV_B32_e32_]], 64, 0, implicit $exec
252
- ; GFX9-NEXT: $sgpr4 = COPY [[V_ADD_U32_e64_]]
253
- ; GFX9-NEXT: SI_RETURN implicit $sgpr4
254
- ;
255
- ; GFX10-LABEL: name: fold_frame_index__v_add_u32_e64___v_fi_imm
256
- ; GFX10: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 %stack.0, 64, 0, implicit $exec
257
- ; GFX10-NEXT: $sgpr4 = COPY [[V_ADD_U32_e64_]]
258
- ; GFX10-NEXT: SI_RETURN implicit $sgpr4
259
- ;
260
- ; GFX12-LABEL: name: fold_frame_index__v_add_u32_e64___v_fi_imm
261
- ; GFX12: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 %stack.0, 64, 0, implicit $exec
262
- ; GFX12-NEXT: $sgpr4 = COPY [[V_ADD_U32_e64_]]
263
- ; GFX12-NEXT: SI_RETURN implicit $sgpr4
233
+ ; CHECK-LABEL: name: fold_frame_index__v_add_u32_e64___v_fi_imm
234
+ ; CHECK: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 %stack.0, 64, 0, implicit $exec
235
+ ; CHECK-NEXT: $sgpr4 = COPY [[V_ADD_U32_e64_]]
236
+ ; CHECK-NEXT: SI_RETURN implicit $sgpr4
264
237
%0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
265
238
%1:vgpr_32 = V_ADD_U32_e64 %0, 64, 0, implicit $exec
266
239
$sgpr4 = COPY %1
@@ -278,8 +251,7 @@ stack:
278
251
body : |
279
252
bb.0:
280
253
; CHECK-LABEL: name: fold_frame_index__v_add_co_u32_e32__const_v_fi
281
- ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
282
- ; CHECK-NEXT: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 128, [[V_MOV_B32_e32_]], implicit-def $vcc, implicit $exec
254
+ ; CHECK: [[V_ADD_CO_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_CO_U32_e32 128, %stack.0, implicit-def $vcc, implicit $exec
283
255
; CHECK-NEXT: $vgpr0 = COPY [[V_ADD_CO_U32_e32_]]
284
256
; CHECK-NEXT: SI_RETURN implicit $vgpr0
285
257
%0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
@@ -298,21 +270,10 @@ stack:
298
270
- { id: 0, size: 16384, alignment: 4, local-offset: 0 }
299
271
body : |
300
272
bb.0:
301
- ; GFX9-LABEL: name: fold_frame_index__v_add_co_u32_e64__v_fi_imm
302
- ; GFX9: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
303
- ; GFX9-NEXT: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_CO_U32_e64 [[V_MOV_B32_e32_]], 64, 0, implicit $exec
304
- ; GFX9-NEXT: $vgpr0 = COPY [[V_ADD_CO_U32_e64_]]
305
- ; GFX9-NEXT: SI_RETURN implicit $vgpr0
306
- ;
307
- ; GFX10-LABEL: name: fold_frame_index__v_add_co_u32_e64__v_fi_imm
308
- ; GFX10: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_CO_U32_e64 %stack.0, 64, 0, implicit $exec
309
- ; GFX10-NEXT: $vgpr0 = COPY [[V_ADD_CO_U32_e64_]]
310
- ; GFX10-NEXT: SI_RETURN implicit $vgpr0
311
- ;
312
- ; GFX12-LABEL: name: fold_frame_index__v_add_co_u32_e64__v_fi_imm
313
- ; GFX12: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_CO_U32_e64 %stack.0, 64, 0, implicit $exec
314
- ; GFX12-NEXT: $vgpr0 = COPY [[V_ADD_CO_U32_e64_]]
315
- ; GFX12-NEXT: SI_RETURN implicit $vgpr0
273
+ ; CHECK-LABEL: name: fold_frame_index__v_add_co_u32_e64__v_fi_imm
274
+ ; CHECK: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_CO_U32_e64 %stack.0, 64, 0, implicit $exec
275
+ ; CHECK-NEXT: $vgpr0 = COPY [[V_ADD_CO_U32_e64_]]
276
+ ; CHECK-NEXT: SI_RETURN implicit $vgpr0
316
277
%0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
317
278
%1:vgpr_32, %2:sreg_64 = V_ADD_CO_U32_e64 %0, 64, 0, implicit $exec
318
279
$vgpr0 = COPY %1
@@ -329,21 +290,10 @@ stack:
329
290
- { id: 0, size: 16384, alignment: 4, local-offset: 0 }
330
291
body : |
331
292
bb.0:
332
- ; GFX9-LABEL: name: fold_frame_index__v_add_co_u32_e64__imm_v_fi
333
- ; GFX9: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
334
- ; GFX9-NEXT: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_CO_U32_e64 64, [[V_MOV_B32_e32_]], 0, implicit $exec
335
- ; GFX9-NEXT: $vgpr0 = COPY [[V_ADD_CO_U32_e64_]]
336
- ; GFX9-NEXT: SI_RETURN implicit $vgpr0
337
- ;
338
- ; GFX10-LABEL: name: fold_frame_index__v_add_co_u32_e64__imm_v_fi
339
- ; GFX10: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_CO_U32_e64 64, %stack.0, 0, implicit $exec
340
- ; GFX10-NEXT: $vgpr0 = COPY [[V_ADD_CO_U32_e64_]]
341
- ; GFX10-NEXT: SI_RETURN implicit $vgpr0
342
- ;
343
- ; GFX12-LABEL: name: fold_frame_index__v_add_co_u32_e64__imm_v_fi
344
- ; GFX12: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_CO_U32_e64 64, %stack.0, 0, implicit $exec
345
- ; GFX12-NEXT: $vgpr0 = COPY [[V_ADD_CO_U32_e64_]]
346
- ; GFX12-NEXT: SI_RETURN implicit $vgpr0
293
+ ; CHECK-LABEL: name: fold_frame_index__v_add_co_u32_e64__imm_v_fi
294
+ ; CHECK: [[V_ADD_CO_U32_e64_:%[0-9]+]]:vgpr_32, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_64 = V_ADD_CO_U32_e64 64, %stack.0, 0, implicit $exec
295
+ ; CHECK-NEXT: $vgpr0 = COPY [[V_ADD_CO_U32_e64_]]
296
+ ; CHECK-NEXT: SI_RETURN implicit $vgpr0
347
297
%0:vgpr_32 = V_MOV_B32_e32 %stack.0, implicit $exec
348
298
%1:vgpr_32, %2:sreg_64 = V_ADD_CO_U32_e64 64, %0, 0, implicit $exec
349
299
$vgpr0 = COPY %1
0 commit comments