@@ -26,6 +26,85 @@ define i32 @func_01() {
2626 ret i32 %rm
2727}
2828
29+ define i1 @test_get_rounding_sideeffect () #0 {
30+ ; RV32IF-LABEL: test_get_rounding_sideeffect:
31+ ; RV32IF: # %bb.0: # %entry
32+ ; RV32IF-NEXT: addi sp, sp, -16
33+ ; RV32IF-NEXT: .cfi_def_cfa_offset 16
34+ ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
35+ ; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
36+ ; RV32IF-NEXT: .cfi_offset ra, -4
37+ ; RV32IF-NEXT: .cfi_offset s0, -8
38+ ; RV32IF-NEXT: li a0, 1
39+ ; RV32IF-NEXT: call fesetround
40+ ; RV32IF-NEXT: frrm a0
41+ ; RV32IF-NEXT: lui a1, 66
42+ ; RV32IF-NEXT: slli a0, a0, 2
43+ ; RV32IF-NEXT: addi a1, a1, 769
44+ ; RV32IF-NEXT: srl s0, a1, a0
45+ ; RV32IF-NEXT: li a0, 0
46+ ; RV32IF-NEXT: andi s0, s0, 7
47+ ; RV32IF-NEXT: bnez s0, .LBB1_2
48+ ; RV32IF-NEXT: # %bb.1: # %if.end
49+ ; RV32IF-NEXT: call fesetround
50+ ; RV32IF-NEXT: addi s0, s0, -1
51+ ; RV32IF-NEXT: seqz a0, s0
52+ ; RV32IF-NEXT: .LBB1_2: # %return
53+ ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
54+ ; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
55+ ; RV32IF-NEXT: .cfi_restore ra
56+ ; RV32IF-NEXT: .cfi_restore s0
57+ ; RV32IF-NEXT: addi sp, sp, 16
58+ ; RV32IF-NEXT: .cfi_def_cfa_offset 0
59+ ; RV32IF-NEXT: ret
60+ ;
61+ ; RV64IF-LABEL: test_get_rounding_sideeffect:
62+ ; RV64IF: # %bb.0: # %entry
63+ ; RV64IF-NEXT: addi sp, sp, -16
64+ ; RV64IF-NEXT: .cfi_def_cfa_offset 16
65+ ; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
66+ ; RV64IF-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
67+ ; RV64IF-NEXT: .cfi_offset ra, -8
68+ ; RV64IF-NEXT: .cfi_offset s0, -16
69+ ; RV64IF-NEXT: li a0, 1
70+ ; RV64IF-NEXT: call fesetround
71+ ; RV64IF-NEXT: frrm a0
72+ ; RV64IF-NEXT: lui a1, 66
73+ ; RV64IF-NEXT: slli a0, a0, 2
74+ ; RV64IF-NEXT: addiw a1, a1, 769
75+ ; RV64IF-NEXT: srl s0, a1, a0
76+ ; RV64IF-NEXT: li a0, 0
77+ ; RV64IF-NEXT: andi s0, s0, 7
78+ ; RV64IF-NEXT: bnez s0, .LBB1_2
79+ ; RV64IF-NEXT: # %bb.1: # %if.end
80+ ; RV64IF-NEXT: call fesetround
81+ ; RV64IF-NEXT: addi s0, s0, -1
82+ ; RV64IF-NEXT: seqz a0, s0
83+ ; RV64IF-NEXT: .LBB1_2: # %return
84+ ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
85+ ; RV64IF-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
86+ ; RV64IF-NEXT: .cfi_restore ra
87+ ; RV64IF-NEXT: .cfi_restore s0
88+ ; RV64IF-NEXT: addi sp, sp, 16
89+ ; RV64IF-NEXT: .cfi_def_cfa_offset 0
90+ ; RV64IF-NEXT: ret
91+ entry:
92+ %call = tail call i32 @fesetround (i32 noundef 1 ) #0
93+ %0 = tail call i32 @llvm.get.rounding () #0
94+ %cmp.not = icmp eq i32 %0 , 0
95+ br i1 %cmp.not , label %if.end , label %return
96+
97+ if.end:
98+ %call1 = tail call i32 @fesetround (i32 noundef 0 ) #0
99+ %1 = tail call i32 @llvm.get.rounding () #0
100+ %cmp2.not = icmp eq i32 %1 , 1
101+ br label %return
102+
103+ return:
104+ %retval.0 = phi i1 [ false , %entry ], [ %cmp2.not , %if.end ]
105+ ret i1 %retval.0
106+ }
107+
29108define void @func_02 (i32 %rm ) {
30109; RV32IF-LABEL: func_02:
31110; RV32IF: # %bb.0:
@@ -121,5 +200,9 @@ define void @func_07() {
121200 ret void
122201}
123202
203+ attributes #0 = { strictfp }
204+
124205declare void @llvm.set.rounding (i32 )
125206declare i32 @llvm.get.rounding ()
207+ declare i32 @fesetround (i32 noundef)
208+
0 commit comments