| 
 | 1 | +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6  | 
 | 2 | +; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s  | 
 | 3 | +; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s  | 
 | 4 | + | 
 | 5 | +define void @vabs_b(ptr %dst, ptr %src) {  | 
 | 6 | +; CHECK-LABEL: vabs_b:  | 
 | 7 | +; CHECK:       # %bb.0: # %entry  | 
 | 8 | +; CHECK-NEXT:    xvld $xr0, $a1, 0  | 
 | 9 | +; CHECK-NEXT:    xvneg.b $xr1, $xr0  | 
 | 10 | +; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1  | 
 | 11 | +; CHECK-NEXT:    xvst $xr0, $a0, 0  | 
 | 12 | +; CHECK-NEXT:    ret  | 
 | 13 | +entry:  | 
 | 14 | +  %a = load <32 x i8>, ptr %src  | 
 | 15 | +  %b = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> %a, i1 true)  | 
 | 16 | +  store <32 x i8> %b, ptr %dst  | 
 | 17 | +  ret void  | 
 | 18 | +}  | 
 | 19 | + | 
 | 20 | +define void @vabs_b_1(ptr %dst, ptr %src) {  | 
 | 21 | +; CHECK-LABEL: vabs_b_1:  | 
 | 22 | +; CHECK:       # %bb.0: # %entry  | 
 | 23 | +; CHECK-NEXT:    xvld $xr0, $a1, 0  | 
 | 24 | +; CHECK-NEXT:    xvneg.b $xr1, $xr0  | 
 | 25 | +; CHECK-NEXT:    xvmax.b $xr0, $xr0, $xr1  | 
 | 26 | +; CHECK-NEXT:    xvst $xr0, $a0, 0  | 
 | 27 | +; CHECK-NEXT:    ret  | 
 | 28 | +entry:  | 
 | 29 | +  %a = load <32 x i8>, ptr %src  | 
 | 30 | +  %b = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> %a, i1 false)  | 
 | 31 | +  store <32 x i8> %b, ptr %dst  | 
 | 32 | +  ret void  | 
 | 33 | +}  | 
 | 34 | + | 
 | 35 | +define void @vabs_h(ptr %dst, ptr %src) {  | 
 | 36 | +; CHECK-LABEL: vabs_h:  | 
 | 37 | +; CHECK:       # %bb.0: # %entry  | 
 | 38 | +; CHECK-NEXT:    xvld $xr0, $a1, 0  | 
 | 39 | +; CHECK-NEXT:    xvneg.h $xr1, $xr0  | 
 | 40 | +; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1  | 
 | 41 | +; CHECK-NEXT:    xvst $xr0, $a0, 0  | 
 | 42 | +; CHECK-NEXT:    ret  | 
 | 43 | +entry:  | 
 | 44 | +  %a = load <16 x i16>, ptr %src  | 
 | 45 | +  %b = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> %a, i1 true)  | 
 | 46 | +  store <16 x i16> %b, ptr %dst  | 
 | 47 | +  ret void  | 
 | 48 | +}  | 
 | 49 | + | 
 | 50 | +define void @vabs_h_1(ptr %dst, ptr %src) {  | 
 | 51 | +; CHECK-LABEL: vabs_h_1:  | 
 | 52 | +; CHECK:       # %bb.0: # %entry  | 
 | 53 | +; CHECK-NEXT:    xvld $xr0, $a1, 0  | 
 | 54 | +; CHECK-NEXT:    xvneg.h $xr1, $xr0  | 
 | 55 | +; CHECK-NEXT:    xvmax.h $xr0, $xr0, $xr1  | 
 | 56 | +; CHECK-NEXT:    xvst $xr0, $a0, 0  | 
 | 57 | +; CHECK-NEXT:    ret  | 
 | 58 | +entry:  | 
 | 59 | +  %a = load <16 x i16>, ptr %src  | 
 | 60 | +  %b = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> %a, i1 false)  | 
 | 61 | +  store <16 x i16> %b, ptr %dst  | 
 | 62 | +  ret void  | 
 | 63 | +}  | 
 | 64 | + | 
 | 65 | +define void @vabs_w(ptr %dst, ptr %src) {  | 
 | 66 | +; CHECK-LABEL: vabs_w:  | 
 | 67 | +; CHECK:       # %bb.0: # %entry  | 
 | 68 | +; CHECK-NEXT:    xvld $xr0, $a1, 0  | 
 | 69 | +; CHECK-NEXT:    xvneg.w $xr1, $xr0  | 
 | 70 | +; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1  | 
 | 71 | +; CHECK-NEXT:    xvst $xr0, $a0, 0  | 
 | 72 | +; CHECK-NEXT:    ret  | 
 | 73 | +entry:  | 
 | 74 | +  %a = load <8 x i32>, ptr %src  | 
 | 75 | +  %b = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> %a, i1 true)  | 
 | 76 | +  store <8 x i32> %b, ptr %dst  | 
 | 77 | +  ret void  | 
 | 78 | +}  | 
 | 79 | + | 
 | 80 | +define void @vabs_w_1(ptr %dst, ptr %src) {  | 
 | 81 | +; CHECK-LABEL: vabs_w_1:  | 
 | 82 | +; CHECK:       # %bb.0: # %entry  | 
 | 83 | +; CHECK-NEXT:    xvld $xr0, $a1, 0  | 
 | 84 | +; CHECK-NEXT:    xvneg.w $xr1, $xr0  | 
 | 85 | +; CHECK-NEXT:    xvmax.w $xr0, $xr0, $xr1  | 
 | 86 | +; CHECK-NEXT:    xvst $xr0, $a0, 0  | 
 | 87 | +; CHECK-NEXT:    ret  | 
 | 88 | +entry:  | 
 | 89 | +  %a = load <8 x i32>, ptr %src  | 
 | 90 | +  %b = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> %a, i1 false)  | 
 | 91 | +  store <8 x i32> %b, ptr %dst  | 
 | 92 | +  ret void  | 
 | 93 | +}  | 
 | 94 | + | 
 | 95 | +define void @vabs_d(ptr %dst, ptr %src) {  | 
 | 96 | +; CHECK-LABEL: vabs_d:  | 
 | 97 | +; CHECK:       # %bb.0: # %entry  | 
 | 98 | +; CHECK-NEXT:    xvld $xr0, $a1, 0  | 
 | 99 | +; CHECK-NEXT:    xvneg.d $xr1, $xr0  | 
 | 100 | +; CHECK-NEXT:    xvmax.d $xr0, $xr0, $xr1  | 
 | 101 | +; CHECK-NEXT:    xvst $xr0, $a0, 0  | 
 | 102 | +; CHECK-NEXT:    ret  | 
 | 103 | +entry:  | 
 | 104 | +  %a = load <4 x i64>, ptr %src  | 
 | 105 | +  %b = tail call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a, i1 true)  | 
 | 106 | +  store <4 x i64> %b, ptr %dst  | 
 | 107 | +  ret void  | 
 | 108 | +}  | 
 | 109 | + | 
 | 110 | +define void @vabs_d_1(ptr %dst, ptr %src) {  | 
 | 111 | +; CHECK-LABEL: vabs_d_1:  | 
 | 112 | +; CHECK:       # %bb.0: # %entry  | 
 | 113 | +; CHECK-NEXT:    xvld $xr0, $a1, 0  | 
 | 114 | +; CHECK-NEXT:    xvneg.d $xr1, $xr0  | 
 | 115 | +; CHECK-NEXT:    xvmax.d $xr0, $xr0, $xr1  | 
 | 116 | +; CHECK-NEXT:    xvst $xr0, $a0, 0  | 
 | 117 | +; CHECK-NEXT:    ret  | 
 | 118 | +entry:  | 
 | 119 | +  %a = load <4 x i64>, ptr %src  | 
 | 120 | +  %b = tail call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a, i1 false)  | 
 | 121 | +  store <4 x i64> %b, ptr %dst  | 
 | 122 | +  ret void  | 
 | 123 | +}  | 
 | 124 | + | 
 | 125 | +declare <32 x i8> @llvm.abs.v32i8(<32 x i8>, i1)  | 
 | 126 | +declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1)  | 
 | 127 | +declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1)  | 
 | 128 | +declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1)  | 
0 commit comments