@@ -962,6 +962,166 @@ define <vscale x 4 x i32> @vmulhsu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl)
962962 ret <vscale x 4 x i32 > %2
963963}
964964
965+ define <vscale x 4 x i32 > @vdivu_vv (<vscale x 4 x i32 > %a , <vscale x 4 x i32 > %b , iXLen %vl ) {
966+ ; NOVLOPT-LABEL: vdivu_vv:
967+ ; NOVLOPT: # %bb.0:
968+ ; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
969+ ; NOVLOPT-NEXT: vdivu.vv v8, v8, v10
970+ ; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
971+ ; NOVLOPT-NEXT: vmul.vv v8, v8, v10
972+ ; NOVLOPT-NEXT: ret
973+ ;
974+ ; VLOPT-LABEL: vdivu_vv:
975+ ; VLOPT: # %bb.0:
976+ ; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
977+ ; VLOPT-NEXT: vdivu.vv v8, v8, v10
978+ ; VLOPT-NEXT: vmul.vv v8, v8, v10
979+ ; VLOPT-NEXT: ret
980+ %1 = call <vscale x 4 x i32 > @llvm.riscv.vdivu.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %a , <vscale x 4 x i32 > %b , iXLen -1 )
981+ %2 = call <vscale x 4 x i32 > @llvm.riscv.vmul.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %1 , <vscale x 4 x i32 > %b , iXLen %vl )
982+ ret <vscale x 4 x i32 > %2
983+ }
984+
985+ define <vscale x 4 x i32 > @vdivu_vx (<vscale x 4 x i32 > %a , i32 %b , iXLen %vl ) {
986+ ; NOVLOPT-LABEL: vdivu_vx:
987+ ; NOVLOPT: # %bb.0:
988+ ; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
989+ ; NOVLOPT-NEXT: vdivu.vx v10, v8, a0
990+ ; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
991+ ; NOVLOPT-NEXT: vmul.vv v8, v10, v8
992+ ; NOVLOPT-NEXT: ret
993+ ;
994+ ; VLOPT-LABEL: vdivu_vx:
995+ ; VLOPT: # %bb.0:
996+ ; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
997+ ; VLOPT-NEXT: vdivu.vx v10, v8, a0
998+ ; VLOPT-NEXT: vmul.vv v8, v10, v8
999+ ; VLOPT-NEXT: ret
1000+ %1 = call <vscale x 4 x i32 > @llvm.riscv.vdivu.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %a , i32 %b , iXLen -1 )
1001+ %2 = call <vscale x 4 x i32 > @llvm.riscv.vmul.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %1 , <vscale x 4 x i32 > %a , iXLen %vl )
1002+ ret <vscale x 4 x i32 > %2
1003+ }
1004+
1005+ define <vscale x 4 x i32 > @vdiv_vv (<vscale x 4 x i32 > %a , <vscale x 4 x i32 > %b , iXLen %vl ) {
1006+ ; NOVLOPT-LABEL: vdiv_vv:
1007+ ; NOVLOPT: # %bb.0:
1008+ ; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
1009+ ; NOVLOPT-NEXT: vdiv.vv v8, v8, v10
1010+ ; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1011+ ; NOVLOPT-NEXT: vmul.vv v8, v8, v10
1012+ ; NOVLOPT-NEXT: ret
1013+ ;
1014+ ; VLOPT-LABEL: vdiv_vv:
1015+ ; VLOPT: # %bb.0:
1016+ ; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1017+ ; VLOPT-NEXT: vdiv.vv v8, v8, v10
1018+ ; VLOPT-NEXT: vmul.vv v8, v8, v10
1019+ ; VLOPT-NEXT: ret
1020+ %1 = call <vscale x 4 x i32 > @llvm.riscv.vdiv.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %a , <vscale x 4 x i32 > %b , iXLen -1 )
1021+ %2 = call <vscale x 4 x i32 > @llvm.riscv.vmul.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %1 , <vscale x 4 x i32 > %b , iXLen %vl )
1022+ ret <vscale x 4 x i32 > %2
1023+ }
1024+
1025+ define <vscale x 4 x i32 > @vdiv_vx (<vscale x 4 x i32 > %a , i32 %b , iXLen %vl ) {
1026+ ; NOVLOPT-LABEL: vdiv_vx:
1027+ ; NOVLOPT: # %bb.0:
1028+ ; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
1029+ ; NOVLOPT-NEXT: vdiv.vx v10, v8, a0
1030+ ; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1031+ ; NOVLOPT-NEXT: vmul.vv v8, v10, v8
1032+ ; NOVLOPT-NEXT: ret
1033+ ;
1034+ ; VLOPT-LABEL: vdiv_vx:
1035+ ; VLOPT: # %bb.0:
1036+ ; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1037+ ; VLOPT-NEXT: vdiv.vx v10, v8, a0
1038+ ; VLOPT-NEXT: vmul.vv v8, v10, v8
1039+ ; VLOPT-NEXT: ret
1040+ %1 = call <vscale x 4 x i32 > @llvm.riscv.vdiv.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %a , i32 %b , iXLen -1 )
1041+ %2 = call <vscale x 4 x i32 > @llvm.riscv.vmul.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %1 , <vscale x 4 x i32 > %a , iXLen %vl )
1042+ ret <vscale x 4 x i32 > %2
1043+ }
1044+
1045+ define <vscale x 4 x i32 > @vremu_vv (<vscale x 4 x i32 > %a , <vscale x 4 x i32 > %b , iXLen %vl ) {
1046+ ; NOVLOPT-LABEL: vremu_vv:
1047+ ; NOVLOPT: # %bb.0:
1048+ ; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
1049+ ; NOVLOPT-NEXT: vremu.vv v8, v8, v10
1050+ ; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1051+ ; NOVLOPT-NEXT: vmul.vv v8, v8, v10
1052+ ; NOVLOPT-NEXT: ret
1053+ ;
1054+ ; VLOPT-LABEL: vremu_vv:
1055+ ; VLOPT: # %bb.0:
1056+ ; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1057+ ; VLOPT-NEXT: vremu.vv v8, v8, v10
1058+ ; VLOPT-NEXT: vmul.vv v8, v8, v10
1059+ ; VLOPT-NEXT: ret
1060+ %1 = call <vscale x 4 x i32 > @llvm.riscv.vremu.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %a , <vscale x 4 x i32 > %b , iXLen -1 )
1061+ %2 = call <vscale x 4 x i32 > @llvm.riscv.vmul.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %1 , <vscale x 4 x i32 > %b , iXLen %vl )
1062+ ret <vscale x 4 x i32 > %2
1063+ }
1064+
1065+ define <vscale x 4 x i32 > @vremu_vx (<vscale x 4 x i32 > %a , i32 %b , iXLen %vl ) {
1066+ ; NOVLOPT-LABEL: vremu_vx:
1067+ ; NOVLOPT: # %bb.0:
1068+ ; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
1069+ ; NOVLOPT-NEXT: vremu.vx v10, v8, a0
1070+ ; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1071+ ; NOVLOPT-NEXT: vmul.vv v8, v10, v8
1072+ ; NOVLOPT-NEXT: ret
1073+ ;
1074+ ; VLOPT-LABEL: vremu_vx:
1075+ ; VLOPT: # %bb.0:
1076+ ; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1077+ ; VLOPT-NEXT: vremu.vx v10, v8, a0
1078+ ; VLOPT-NEXT: vmul.vv v8, v10, v8
1079+ ; VLOPT-NEXT: ret
1080+ %1 = call <vscale x 4 x i32 > @llvm.riscv.vremu.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %a , i32 %b , iXLen -1 )
1081+ %2 = call <vscale x 4 x i32 > @llvm.riscv.vmul.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %1 , <vscale x 4 x i32 > %a , iXLen %vl )
1082+ ret <vscale x 4 x i32 > %2
1083+ }
1084+
1085+ define <vscale x 4 x i32 > @vrem_vv (<vscale x 4 x i32 > %a , <vscale x 4 x i32 > %b , iXLen %vl ) {
1086+ ; NOVLOPT-LABEL: vrem_vv:
1087+ ; NOVLOPT: # %bb.0:
1088+ ; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
1089+ ; NOVLOPT-NEXT: vrem.vv v8, v8, v10
1090+ ; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1091+ ; NOVLOPT-NEXT: vmul.vv v8, v8, v10
1092+ ; NOVLOPT-NEXT: ret
1093+ ;
1094+ ; VLOPT-LABEL: vrem_vv:
1095+ ; VLOPT: # %bb.0:
1096+ ; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1097+ ; VLOPT-NEXT: vrem.vv v8, v8, v10
1098+ ; VLOPT-NEXT: vmul.vv v8, v8, v10
1099+ ; VLOPT-NEXT: ret
1100+ %1 = call <vscale x 4 x i32 > @llvm.riscv.vrem.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %a , <vscale x 4 x i32 > %b , iXLen -1 )
1101+ %2 = call <vscale x 4 x i32 > @llvm.riscv.vmul.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %1 , <vscale x 4 x i32 > %b , iXLen %vl )
1102+ ret <vscale x 4 x i32 > %2
1103+ }
1104+
1105+ define <vscale x 4 x i32 > @vrem_vx (<vscale x 4 x i32 > %a , i32 %b , iXLen %vl ) {
1106+ ; NOVLOPT-LABEL: vrem_vx:
1107+ ; NOVLOPT: # %bb.0:
1108+ ; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
1109+ ; NOVLOPT-NEXT: vrem.vx v10, v8, a0
1110+ ; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1111+ ; NOVLOPT-NEXT: vmul.vv v8, v10, v8
1112+ ; NOVLOPT-NEXT: ret
1113+ ;
1114+ ; VLOPT-LABEL: vrem_vx:
1115+ ; VLOPT: # %bb.0:
1116+ ; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1117+ ; VLOPT-NEXT: vrem.vx v10, v8, a0
1118+ ; VLOPT-NEXT: vmul.vv v8, v10, v8
1119+ ; VLOPT-NEXT: ret
1120+ %1 = call <vscale x 4 x i32 > @llvm.riscv.vrem.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %a , i32 %b , iXLen -1 )
1121+ %2 = call <vscale x 4 x i32 > @llvm.riscv.vmul.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %1 , <vscale x 4 x i32 > %a , iXLen %vl )
1122+ ret <vscale x 4 x i32 > %2
1123+ }
1124+
9651125define <vscale x 4 x i32 > @vwmacc_vx (<vscale x 4 x i16 > %a , i16 %b , iXLen %vl ) {
9661126; NOVLOPT-LABEL: vwmacc_vx:
9671127; NOVLOPT: # %bb.0:
0 commit comments