|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| 2 | +; RUN: llc -mtriple=loongarch32 -mattr=+d < %s | FileCheck -check-prefix=LA32 %s |
| 3 | +; RUN: llc -mtriple=loongarch64 -mattr=+d < %s | FileCheck -check-prefix=LA64 %s |
| 4 | + |
| 5 | +define half @exp10_f16(half %x) #0 { |
| 6 | +; LA32-LABEL: exp10_f16: |
| 7 | +; LA32: # %bb.0: |
| 8 | +; LA32-NEXT: addi.w $sp, $sp, -16 |
| 9 | +; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill |
| 10 | +; LA32-NEXT: bl __extendhfsf2 |
| 11 | +; LA32-NEXT: bl exp10f |
| 12 | +; LA32-NEXT: bl __truncsfhf2 |
| 13 | +; LA32-NEXT: movfr2gr.s $a0, $fa0 |
| 14 | +; LA32-NEXT: lu12i.w $a1, -16 |
| 15 | +; LA32-NEXT: or $a0, $a0, $a1 |
| 16 | +; LA32-NEXT: movgr2fr.w $fa0, $a0 |
| 17 | +; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload |
| 18 | +; LA32-NEXT: addi.w $sp, $sp, 16 |
| 19 | +; LA32-NEXT: ret |
| 20 | +; |
| 21 | +; LA64-LABEL: exp10_f16: |
| 22 | +; LA64: # %bb.0: |
| 23 | +; LA64-NEXT: addi.d $sp, $sp, -16 |
| 24 | +; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill |
| 25 | +; LA64-NEXT: pcaddu18i $ra, %call36(__extendhfsf2) |
| 26 | +; LA64-NEXT: jirl $ra, $ra, 0 |
| 27 | +; LA64-NEXT: pcaddu18i $ra, %call36(exp10f) |
| 28 | +; LA64-NEXT: jirl $ra, $ra, 0 |
| 29 | +; LA64-NEXT: pcaddu18i $ra, %call36(__truncsfhf2) |
| 30 | +; LA64-NEXT: jirl $ra, $ra, 0 |
| 31 | +; LA64-NEXT: movfr2gr.s $a0, $fa0 |
| 32 | +; LA64-NEXT: lu12i.w $a1, -16 |
| 33 | +; LA64-NEXT: or $a0, $a0, $a1 |
| 34 | +; LA64-NEXT: movgr2fr.w $fa0, $a0 |
| 35 | +; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload |
| 36 | +; LA64-NEXT: addi.d $sp, $sp, 16 |
| 37 | +; LA64-NEXT: ret |
| 38 | + %r = call half @llvm.exp10.f16(half %x) |
| 39 | + ret half %r |
| 40 | +} |
| 41 | + |
| 42 | +define <2 x half> @exp10_v2f16(<2 x half> %x) #0 { |
| 43 | +; LA32-LABEL: exp10_v2f16: |
| 44 | +; LA32: # %bb.0: |
| 45 | +; LA32-NEXT: addi.w $sp, $sp, -16 |
| 46 | +; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill |
| 47 | +; LA32-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill |
| 48 | +; LA32-NEXT: fst.d $fs0, $sp, 0 # 8-byte Folded Spill |
| 49 | +; LA32-NEXT: movgr2fr.w $fs0, $a1 |
| 50 | +; LA32-NEXT: movgr2fr.w $fa0, $a0 |
| 51 | +; LA32-NEXT: bl __extendhfsf2 |
| 52 | +; LA32-NEXT: bl exp10f |
| 53 | +; LA32-NEXT: bl __truncsfhf2 |
| 54 | +; LA32-NEXT: movfr2gr.s $fp, $fa0 |
| 55 | +; LA32-NEXT: fmov.s $fa0, $fs0 |
| 56 | +; LA32-NEXT: bl __extendhfsf2 |
| 57 | +; LA32-NEXT: bl exp10f |
| 58 | +; LA32-NEXT: bl __truncsfhf2 |
| 59 | +; LA32-NEXT: movfr2gr.s $a1, $fa0 |
| 60 | +; LA32-NEXT: move $a0, $fp |
| 61 | +; LA32-NEXT: fld.d $fs0, $sp, 0 # 8-byte Folded Reload |
| 62 | +; LA32-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload |
| 63 | +; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload |
| 64 | +; LA32-NEXT: addi.w $sp, $sp, 16 |
| 65 | +; LA32-NEXT: ret |
| 66 | +; |
| 67 | +; LA64-LABEL: exp10_v2f16: |
| 68 | +; LA64: # %bb.0: |
| 69 | +; LA64-NEXT: addi.d $sp, $sp, -32 |
| 70 | +; LA64-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill |
| 71 | +; LA64-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill |
| 72 | +; LA64-NEXT: move $fp, $a0 |
| 73 | +; LA64-NEXT: movgr2fr.w $fa0, $a1 |
| 74 | +; LA64-NEXT: pcaddu18i $ra, %call36(__extendhfsf2) |
| 75 | +; LA64-NEXT: jirl $ra, $ra, 0 |
| 76 | +; LA64-NEXT: pcaddu18i $ra, %call36(exp10f) |
| 77 | +; LA64-NEXT: jirl $ra, $ra, 0 |
| 78 | +; LA64-NEXT: pcaddu18i $ra, %call36(__truncsfhf2) |
| 79 | +; LA64-NEXT: jirl $ra, $ra, 0 |
| 80 | +; LA64-NEXT: movfr2gr.s $a0, $fa0 |
| 81 | +; LA64-NEXT: st.h $a0, $sp, 2 |
| 82 | +; LA64-NEXT: movgr2fr.w $fa0, $fp |
| 83 | +; LA64-NEXT: pcaddu18i $ra, %call36(__extendhfsf2) |
| 84 | +; LA64-NEXT: jirl $ra, $ra, 0 |
| 85 | +; LA64-NEXT: pcaddu18i $ra, %call36(exp10f) |
| 86 | +; LA64-NEXT: jirl $ra, $ra, 0 |
| 87 | +; LA64-NEXT: pcaddu18i $ra, %call36(__truncsfhf2) |
| 88 | +; LA64-NEXT: jirl $ra, $ra, 0 |
| 89 | +; LA64-NEXT: movfr2gr.s $a0, $fa0 |
| 90 | +; LA64-NEXT: st.h $a0, $sp, 0 |
| 91 | +; LA64-NEXT: vld $vr0, $sp, 0 |
| 92 | +; LA64-NEXT: vpickve2gr.h $a0, $vr0, 0 |
| 93 | +; LA64-NEXT: vpickve2gr.h $a1, $vr0, 1 |
| 94 | +; LA64-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload |
| 95 | +; LA64-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload |
| 96 | +; LA64-NEXT: addi.d $sp, $sp, 32 |
| 97 | +; LA64-NEXT: ret |
| 98 | + %r = call <2 x half> @llvm.exp10.v2f16(<2 x half> %x) |
| 99 | + ret <2 x half> %r |
| 100 | +} |
| 101 | + |
| 102 | +define float @exp10_f32(float %x) #0 { |
| 103 | +; LA32-LABEL: exp10_f32: |
| 104 | +; LA32: # %bb.0: |
| 105 | +; LA32-NEXT: b exp10f |
| 106 | +; |
| 107 | +; LA64-LABEL: exp10_f32: |
| 108 | +; LA64: # %bb.0: |
| 109 | +; LA64-NEXT: pcaddu18i $t8, %call36(exp10f) |
| 110 | +; LA64-NEXT: jr $t8 |
| 111 | + %r = call float @llvm.exp10.f32(float %x) |
| 112 | + ret float %r |
| 113 | +} |
| 114 | + |
| 115 | +define <2 x float> @exp10_v2f32(<2 x float> %x) #0 { |
| 116 | +; LA32-LABEL: exp10_v2f32: |
| 117 | +; LA32: # %bb.0: |
| 118 | +; LA32-NEXT: addi.w $sp, $sp, -32 |
| 119 | +; LA32-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill |
| 120 | +; LA32-NEXT: fst.d $fs0, $sp, 16 # 8-byte Folded Spill |
| 121 | +; LA32-NEXT: fst.d $fs1, $sp, 8 # 8-byte Folded Spill |
| 122 | +; LA32-NEXT: fmov.s $fs0, $fa1 |
| 123 | +; LA32-NEXT: bl exp10f |
| 124 | +; LA32-NEXT: fmov.s $fs1, $fa0 |
| 125 | +; LA32-NEXT: fmov.s $fa0, $fs0 |
| 126 | +; LA32-NEXT: bl exp10f |
| 127 | +; LA32-NEXT: fmov.s $fa1, $fa0 |
| 128 | +; LA32-NEXT: fmov.s $fa0, $fs1 |
| 129 | +; LA32-NEXT: fld.d $fs1, $sp, 8 # 8-byte Folded Reload |
| 130 | +; LA32-NEXT: fld.d $fs0, $sp, 16 # 8-byte Folded Reload |
| 131 | +; LA32-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload |
| 132 | +; LA32-NEXT: addi.w $sp, $sp, 32 |
| 133 | +; LA32-NEXT: ret |
| 134 | +; |
| 135 | +; LA64-LABEL: exp10_v2f32: |
| 136 | +; LA64: # %bb.0: |
| 137 | +; LA64-NEXT: addi.d $sp, $sp, -48 |
| 138 | +; LA64-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill |
| 139 | +; LA64-NEXT: vst $vr0, $sp, 0 # 16-byte Folded Spill |
| 140 | +; LA64-NEXT: vreplvei.w $vr0, $vr0, 0 |
| 141 | +; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 |
| 142 | +; LA64-NEXT: pcaddu18i $ra, %call36(exp10f) |
| 143 | +; LA64-NEXT: jirl $ra, $ra, 0 |
| 144 | +; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 |
| 145 | +; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill |
| 146 | +; LA64-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload |
| 147 | +; LA64-NEXT: vreplvei.w $vr0, $vr0, 1 |
| 148 | +; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0 |
| 149 | +; LA64-NEXT: pcaddu18i $ra, %call36(exp10f) |
| 150 | +; LA64-NEXT: jirl $ra, $ra, 0 |
| 151 | +; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0 |
| 152 | +; LA64-NEXT: vld $vr1, $sp, 16 # 16-byte Folded Reload |
| 153 | +; LA64-NEXT: vpackev.w $vr0, $vr0, $vr1 |
| 154 | +; LA64-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload |
| 155 | +; LA64-NEXT: addi.d $sp, $sp, 48 |
| 156 | +; LA64-NEXT: ret |
| 157 | + %r = call <2 x float> @llvm.exp10.v2f32(<2 x float> %x) |
| 158 | + ret <2 x float> %r |
| 159 | +} |
| 160 | + |
| 161 | +define double @exp10_f64(double %x) #0 { |
| 162 | +; LA32-LABEL: exp10_f64: |
| 163 | +; LA32: # %bb.0: |
| 164 | +; LA32-NEXT: b exp10 |
| 165 | +; |
| 166 | +; LA64-LABEL: exp10_f64: |
| 167 | +; LA64: # %bb.0: |
| 168 | +; LA64-NEXT: pcaddu18i $t8, %call36(exp10) |
| 169 | +; LA64-NEXT: jr $t8 |
| 170 | + %r = call double @llvm.exp10.f64(double %x) |
| 171 | + ret double %r |
| 172 | +} |
| 173 | + |
| 174 | +define <2 x double> @exp10_v2f64(<2 x double> %x) #0 { |
| 175 | +; LA32-LABEL: exp10_v2f64: |
| 176 | +; LA32: # %bb.0: |
| 177 | +; LA32-NEXT: addi.w $sp, $sp, -32 |
| 178 | +; LA32-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill |
| 179 | +; LA32-NEXT: fst.d $fs0, $sp, 16 # 8-byte Folded Spill |
| 180 | +; LA32-NEXT: fst.d $fs1, $sp, 8 # 8-byte Folded Spill |
| 181 | +; LA32-NEXT: fmov.d $fs0, $fa1 |
| 182 | +; LA32-NEXT: bl exp10 |
| 183 | +; LA32-NEXT: fmov.d $fs1, $fa0 |
| 184 | +; LA32-NEXT: fmov.d $fa0, $fs0 |
| 185 | +; LA32-NEXT: bl exp10 |
| 186 | +; LA32-NEXT: fmov.d $fa1, $fa0 |
| 187 | +; LA32-NEXT: fmov.d $fa0, $fs1 |
| 188 | +; LA32-NEXT: fld.d $fs1, $sp, 8 # 8-byte Folded Reload |
| 189 | +; LA32-NEXT: fld.d $fs0, $sp, 16 # 8-byte Folded Reload |
| 190 | +; LA32-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload |
| 191 | +; LA32-NEXT: addi.w $sp, $sp, 32 |
| 192 | +; LA32-NEXT: ret |
| 193 | +; |
| 194 | +; LA64-LABEL: exp10_v2f64: |
| 195 | +; LA64: # %bb.0: |
| 196 | +; LA64-NEXT: addi.d $sp, $sp, -48 |
| 197 | +; LA64-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill |
| 198 | +; LA64-NEXT: vst $vr0, $sp, 0 # 16-byte Folded Spill |
| 199 | +; LA64-NEXT: vreplvei.d $vr0, $vr0, 0 |
| 200 | +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0 |
| 201 | +; LA64-NEXT: pcaddu18i $ra, %call36(exp10) |
| 202 | +; LA64-NEXT: jirl $ra, $ra, 0 |
| 203 | +; LA64-NEXT: movfr2gr.d $a0, $fa0 |
| 204 | +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0 |
| 205 | +; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill |
| 206 | +; LA64-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload |
| 207 | +; LA64-NEXT: vreplvei.d $vr0, $vr0, 1 |
| 208 | +; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0 |
| 209 | +; LA64-NEXT: pcaddu18i $ra, %call36(exp10) |
| 210 | +; LA64-NEXT: jirl $ra, $ra, 0 |
| 211 | +; LA64-NEXT: movfr2gr.d $a0, $fa0 |
| 212 | +; LA64-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload |
| 213 | +; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 1 |
| 214 | +; LA64-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload |
| 215 | +; LA64-NEXT: addi.d $sp, $sp, 48 |
| 216 | +; LA64-NEXT: ret |
| 217 | + %r = call <2 x double> @llvm.exp10.v2f64(<2 x double> %x) |
| 218 | + ret <2 x double> %r |
| 219 | +} |
| 220 | + |
| 221 | +define fp128 @exp10_f128(fp128 %x) #0 { |
| 222 | +; LA32-LABEL: exp10_f128: |
| 223 | +; LA32: # %bb.0: |
| 224 | +; LA32-NEXT: addi.w $sp, $sp, -48 |
| 225 | +; LA32-NEXT: st.w $ra, $sp, 44 # 4-byte Folded Spill |
| 226 | +; LA32-NEXT: st.w $fp, $sp, 40 # 4-byte Folded Spill |
| 227 | +; LA32-NEXT: ld.w $a2, $a1, 0 |
| 228 | +; LA32-NEXT: ld.w $a3, $a1, 4 |
| 229 | +; LA32-NEXT: ld.w $a4, $a1, 8 |
| 230 | +; LA32-NEXT: ld.w $a1, $a1, 12 |
| 231 | +; LA32-NEXT: move $fp, $a0 |
| 232 | +; LA32-NEXT: st.w $a1, $sp, 20 |
| 233 | +; LA32-NEXT: st.w $a4, $sp, 16 |
| 234 | +; LA32-NEXT: st.w $a3, $sp, 12 |
| 235 | +; LA32-NEXT: addi.w $a0, $sp, 24 |
| 236 | +; LA32-NEXT: addi.w $a1, $sp, 8 |
| 237 | +; LA32-NEXT: st.w $a2, $sp, 8 |
| 238 | +; LA32-NEXT: bl exp10l |
| 239 | +; LA32-NEXT: ld.w $a0, $sp, 36 |
| 240 | +; LA32-NEXT: ld.w $a1, $sp, 32 |
| 241 | +; LA32-NEXT: ld.w $a2, $sp, 28 |
| 242 | +; LA32-NEXT: ld.w $a3, $sp, 24 |
| 243 | +; LA32-NEXT: st.w $a0, $fp, 12 |
| 244 | +; LA32-NEXT: st.w $a1, $fp, 8 |
| 245 | +; LA32-NEXT: st.w $a2, $fp, 4 |
| 246 | +; LA32-NEXT: st.w $a3, $fp, 0 |
| 247 | +; LA32-NEXT: ld.w $fp, $sp, 40 # 4-byte Folded Reload |
| 248 | +; LA32-NEXT: ld.w $ra, $sp, 44 # 4-byte Folded Reload |
| 249 | +; LA32-NEXT: addi.w $sp, $sp, 48 |
| 250 | +; LA32-NEXT: ret |
| 251 | +; |
| 252 | +; LA64-LABEL: exp10_f128: |
| 253 | +; LA64: # %bb.0: |
| 254 | +; LA64-NEXT: addi.d $sp, $sp, -16 |
| 255 | +; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill |
| 256 | +; LA64-NEXT: pcaddu18i $ra, %call36(exp10l) |
| 257 | +; LA64-NEXT: jirl $ra, $ra, 0 |
| 258 | +; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload |
| 259 | +; LA64-NEXT: addi.d $sp, $sp, 16 |
| 260 | +; LA64-NEXT: ret |
| 261 | + %r = call fp128 @llvm.exp10.f128(fp128 %x) |
| 262 | + ret fp128 %r |
| 263 | +} |
| 264 | + |
| 265 | +define <2 x fp128> @exp10_v2f128(<2 x fp128> %x) #0 { |
| 266 | +; LA32-LABEL: exp10_v2f128: |
| 267 | +; LA32: # %bb.0: |
| 268 | +; LA32-NEXT: addi.w $sp, $sp, -96 |
| 269 | +; LA32-NEXT: st.w $ra, $sp, 92 # 4-byte Folded Spill |
| 270 | +; LA32-NEXT: st.w $fp, $sp, 88 # 4-byte Folded Spill |
| 271 | +; LA32-NEXT: st.w $s0, $sp, 84 # 4-byte Folded Spill |
| 272 | +; LA32-NEXT: st.w $s1, $sp, 80 # 4-byte Folded Spill |
| 273 | +; LA32-NEXT: st.w $s2, $sp, 76 # 4-byte Folded Spill |
| 274 | +; LA32-NEXT: st.w $s3, $sp, 72 # 4-byte Folded Spill |
| 275 | +; LA32-NEXT: ld.w $s0, $a1, 16 |
| 276 | +; LA32-NEXT: ld.w $s1, $a1, 20 |
| 277 | +; LA32-NEXT: ld.w $s2, $a1, 24 |
| 278 | +; LA32-NEXT: ld.w $s3, $a1, 28 |
| 279 | +; LA32-NEXT: ld.w $a2, $a1, 0 |
| 280 | +; LA32-NEXT: ld.w $a3, $a1, 4 |
| 281 | +; LA32-NEXT: ld.w $a4, $a1, 8 |
| 282 | +; LA32-NEXT: ld.w $a1, $a1, 12 |
| 283 | +; LA32-NEXT: move $fp, $a0 |
| 284 | +; LA32-NEXT: st.w $a1, $sp, 20 |
| 285 | +; LA32-NEXT: st.w $a4, $sp, 16 |
| 286 | +; LA32-NEXT: st.w $a3, $sp, 12 |
| 287 | +; LA32-NEXT: addi.w $a0, $sp, 24 |
| 288 | +; LA32-NEXT: addi.w $a1, $sp, 8 |
| 289 | +; LA32-NEXT: st.w $a2, $sp, 8 |
| 290 | +; LA32-NEXT: bl exp10l |
| 291 | +; LA32-NEXT: st.w $s3, $sp, 52 |
| 292 | +; LA32-NEXT: st.w $s2, $sp, 48 |
| 293 | +; LA32-NEXT: st.w $s1, $sp, 44 |
| 294 | +; LA32-NEXT: addi.w $a0, $sp, 56 |
| 295 | +; LA32-NEXT: addi.w $a1, $sp, 40 |
| 296 | +; LA32-NEXT: st.w $s0, $sp, 40 |
| 297 | +; LA32-NEXT: bl exp10l |
| 298 | +; LA32-NEXT: ld.w $a0, $sp, 24 |
| 299 | +; LA32-NEXT: ld.w $a1, $sp, 28 |
| 300 | +; LA32-NEXT: ld.w $a2, $sp, 32 |
| 301 | +; LA32-NEXT: ld.w $a3, $sp, 36 |
| 302 | +; LA32-NEXT: ld.w $a4, $sp, 68 |
| 303 | +; LA32-NEXT: ld.w $a5, $sp, 64 |
| 304 | +; LA32-NEXT: ld.w $a6, $sp, 60 |
| 305 | +; LA32-NEXT: ld.w $a7, $sp, 56 |
| 306 | +; LA32-NEXT: st.w $a4, $fp, 28 |
| 307 | +; LA32-NEXT: st.w $a5, $fp, 24 |
| 308 | +; LA32-NEXT: st.w $a6, $fp, 20 |
| 309 | +; LA32-NEXT: st.w $a7, $fp, 16 |
| 310 | +; LA32-NEXT: st.w $a3, $fp, 12 |
| 311 | +; LA32-NEXT: st.w $a2, $fp, 8 |
| 312 | +; LA32-NEXT: st.w $a1, $fp, 4 |
| 313 | +; LA32-NEXT: st.w $a0, $fp, 0 |
| 314 | +; LA32-NEXT: ld.w $s3, $sp, 72 # 4-byte Folded Reload |
| 315 | +; LA32-NEXT: ld.w $s2, $sp, 76 # 4-byte Folded Reload |
| 316 | +; LA32-NEXT: ld.w $s1, $sp, 80 # 4-byte Folded Reload |
| 317 | +; LA32-NEXT: ld.w $s0, $sp, 84 # 4-byte Folded Reload |
| 318 | +; LA32-NEXT: ld.w $fp, $sp, 88 # 4-byte Folded Reload |
| 319 | +; LA32-NEXT: ld.w $ra, $sp, 92 # 4-byte Folded Reload |
| 320 | +; LA32-NEXT: addi.w $sp, $sp, 96 |
| 321 | +; LA32-NEXT: ret |
| 322 | +; |
| 323 | +; LA64-LABEL: exp10_v2f128: |
| 324 | +; LA64: # %bb.0: |
| 325 | +; LA64-NEXT: addi.d $sp, $sp, -48 |
| 326 | +; LA64-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill |
| 327 | +; LA64-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill |
| 328 | +; LA64-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill |
| 329 | +; LA64-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill |
| 330 | +; LA64-NEXT: st.d $s2, $sp, 8 # 8-byte Folded Spill |
| 331 | +; LA64-NEXT: st.d $s3, $sp, 0 # 8-byte Folded Spill |
| 332 | +; LA64-NEXT: ld.d $fp, $a1, 16 |
| 333 | +; LA64-NEXT: ld.d $s0, $a1, 24 |
| 334 | +; LA64-NEXT: ld.d $a2, $a1, 0 |
| 335 | +; LA64-NEXT: ld.d $a1, $a1, 8 |
| 336 | +; LA64-NEXT: move $s1, $a0 |
| 337 | +; LA64-NEXT: move $a0, $a2 |
| 338 | +; LA64-NEXT: pcaddu18i $ra, %call36(exp10l) |
| 339 | +; LA64-NEXT: jirl $ra, $ra, 0 |
| 340 | +; LA64-NEXT: move $s2, $a0 |
| 341 | +; LA64-NEXT: move $s3, $a1 |
| 342 | +; LA64-NEXT: move $a0, $fp |
| 343 | +; LA64-NEXT: move $a1, $s0 |
| 344 | +; LA64-NEXT: pcaddu18i $ra, %call36(exp10l) |
| 345 | +; LA64-NEXT: jirl $ra, $ra, 0 |
| 346 | +; LA64-NEXT: st.d $a1, $s1, 24 |
| 347 | +; LA64-NEXT: st.d $a0, $s1, 16 |
| 348 | +; LA64-NEXT: st.d $s3, $s1, 8 |
| 349 | +; LA64-NEXT: st.d $s2, $s1, 0 |
| 350 | +; LA64-NEXT: ld.d $s3, $sp, 0 # 8-byte Folded Reload |
| 351 | +; LA64-NEXT: ld.d $s2, $sp, 8 # 8-byte Folded Reload |
| 352 | +; LA64-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload |
| 353 | +; LA64-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload |
| 354 | +; LA64-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload |
| 355 | +; LA64-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload |
| 356 | +; LA64-NEXT: addi.d $sp, $sp, 48 |
| 357 | +; LA64-NEXT: ret |
| 358 | + %r = call <2 x fp128> @llvm.exp10.v2f128(<2 x fp128> %x) |
| 359 | + ret <2 x fp128> %r |
| 360 | +} |
| 361 | + |
| 362 | +attributes #0 = { nounwind } |
0 commit comments