@@ -804,6 +804,166 @@ define <vscale x 4 x i32> @vmulhsu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl)
804804 ret <vscale x 4 x i32 > %2
805805}
806806
807+ define <vscale x 4 x i32 > @vdivu_vv (<vscale x 4 x i32 > %a , <vscale x 4 x i32 > %b , iXLen %vl ) {
808+ ; NOVLOPT-LABEL: vdivu_vv:
809+ ; NOVLOPT: # %bb.0:
810+ ; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
811+ ; NOVLOPT-NEXT: vdivu.vv v8, v8, v10
812+ ; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
813+ ; NOVLOPT-NEXT: vmul.vv v8, v8, v10
814+ ; NOVLOPT-NEXT: ret
815+ ;
816+ ; VLOPT-LABEL: vdivu_vv:
817+ ; VLOPT: # %bb.0:
818+ ; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
819+ ; VLOPT-NEXT: vdivu.vv v8, v8, v10
820+ ; VLOPT-NEXT: vmul.vv v8, v8, v10
821+ ; VLOPT-NEXT: ret
822+ %1 = call <vscale x 4 x i32 > @llvm.riscv.vdivu.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %a , <vscale x 4 x i32 > %b , iXLen -1 )
823+ %2 = call <vscale x 4 x i32 > @llvm.riscv.vmul.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %1 , <vscale x 4 x i32 > %b , iXLen %vl )
824+ ret <vscale x 4 x i32 > %2
825+ }
826+
827+ define <vscale x 4 x i32 > @vdivu_vx (<vscale x 4 x i32 > %a , i32 %b , iXLen %vl ) {
828+ ; NOVLOPT-LABEL: vdivu_vx:
829+ ; NOVLOPT: # %bb.0:
830+ ; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
831+ ; NOVLOPT-NEXT: vdivu.vx v10, v8, a0
832+ ; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
833+ ; NOVLOPT-NEXT: vmul.vv v8, v10, v8
834+ ; NOVLOPT-NEXT: ret
835+ ;
836+ ; VLOPT-LABEL: vdivu_vx:
837+ ; VLOPT: # %bb.0:
838+ ; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
839+ ; VLOPT-NEXT: vdivu.vx v10, v8, a0
840+ ; VLOPT-NEXT: vmul.vv v8, v10, v8
841+ ; VLOPT-NEXT: ret
842+ %1 = call <vscale x 4 x i32 > @llvm.riscv.vdivu.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %a , i32 %b , iXLen -1 )
843+ %2 = call <vscale x 4 x i32 > @llvm.riscv.vmul.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %1 , <vscale x 4 x i32 > %a , iXLen %vl )
844+ ret <vscale x 4 x i32 > %2
845+ }
846+
847+ define <vscale x 4 x i32 > @vdiv_vv (<vscale x 4 x i32 > %a , <vscale x 4 x i32 > %b , iXLen %vl ) {
848+ ; NOVLOPT-LABEL: vdiv_vv:
849+ ; NOVLOPT: # %bb.0:
850+ ; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
851+ ; NOVLOPT-NEXT: vdiv.vv v8, v8, v10
852+ ; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
853+ ; NOVLOPT-NEXT: vmul.vv v8, v8, v10
854+ ; NOVLOPT-NEXT: ret
855+ ;
856+ ; VLOPT-LABEL: vdiv_vv:
857+ ; VLOPT: # %bb.0:
858+ ; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
859+ ; VLOPT-NEXT: vdiv.vv v8, v8, v10
860+ ; VLOPT-NEXT: vmul.vv v8, v8, v10
861+ ; VLOPT-NEXT: ret
862+ %1 = call <vscale x 4 x i32 > @llvm.riscv.vdiv.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %a , <vscale x 4 x i32 > %b , iXLen -1 )
863+ %2 = call <vscale x 4 x i32 > @llvm.riscv.vmul.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %1 , <vscale x 4 x i32 > %b , iXLen %vl )
864+ ret <vscale x 4 x i32 > %2
865+ }
866+
867+ define <vscale x 4 x i32 > @vdiv_vx (<vscale x 4 x i32 > %a , i32 %b , iXLen %vl ) {
868+ ; NOVLOPT-LABEL: vdiv_vx:
869+ ; NOVLOPT: # %bb.0:
870+ ; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
871+ ; NOVLOPT-NEXT: vdiv.vx v10, v8, a0
872+ ; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
873+ ; NOVLOPT-NEXT: vmul.vv v8, v10, v8
874+ ; NOVLOPT-NEXT: ret
875+ ;
876+ ; VLOPT-LABEL: vdiv_vx:
877+ ; VLOPT: # %bb.0:
878+ ; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
879+ ; VLOPT-NEXT: vdiv.vx v10, v8, a0
880+ ; VLOPT-NEXT: vmul.vv v8, v10, v8
881+ ; VLOPT-NEXT: ret
882+ %1 = call <vscale x 4 x i32 > @llvm.riscv.vdiv.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %a , i32 %b , iXLen -1 )
883+ %2 = call <vscale x 4 x i32 > @llvm.riscv.vmul.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %1 , <vscale x 4 x i32 > %a , iXLen %vl )
884+ ret <vscale x 4 x i32 > %2
885+ }
886+
887+ define <vscale x 4 x i32 > @vremu_vv (<vscale x 4 x i32 > %a , <vscale x 4 x i32 > %b , iXLen %vl ) {
888+ ; NOVLOPT-LABEL: vremu_vv:
889+ ; NOVLOPT: # %bb.0:
890+ ; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
891+ ; NOVLOPT-NEXT: vremu.vv v8, v8, v10
892+ ; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
893+ ; NOVLOPT-NEXT: vmul.vv v8, v8, v10
894+ ; NOVLOPT-NEXT: ret
895+ ;
896+ ; VLOPT-LABEL: vremu_vv:
897+ ; VLOPT: # %bb.0:
898+ ; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
899+ ; VLOPT-NEXT: vremu.vv v8, v8, v10
900+ ; VLOPT-NEXT: vmul.vv v8, v8, v10
901+ ; VLOPT-NEXT: ret
902+ %1 = call <vscale x 4 x i32 > @llvm.riscv.vremu.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %a , <vscale x 4 x i32 > %b , iXLen -1 )
903+ %2 = call <vscale x 4 x i32 > @llvm.riscv.vmul.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %1 , <vscale x 4 x i32 > %b , iXLen %vl )
904+ ret <vscale x 4 x i32 > %2
905+ }
906+
907+ define <vscale x 4 x i32 > @vremu_vx (<vscale x 4 x i32 > %a , i32 %b , iXLen %vl ) {
908+ ; NOVLOPT-LABEL: vremu_vx:
909+ ; NOVLOPT: # %bb.0:
910+ ; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
911+ ; NOVLOPT-NEXT: vremu.vx v10, v8, a0
912+ ; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
913+ ; NOVLOPT-NEXT: vmul.vv v8, v10, v8
914+ ; NOVLOPT-NEXT: ret
915+ ;
916+ ; VLOPT-LABEL: vremu_vx:
917+ ; VLOPT: # %bb.0:
918+ ; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
919+ ; VLOPT-NEXT: vremu.vx v10, v8, a0
920+ ; VLOPT-NEXT: vmul.vv v8, v10, v8
921+ ; VLOPT-NEXT: ret
922+ %1 = call <vscale x 4 x i32 > @llvm.riscv.vremu.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %a , i32 %b , iXLen -1 )
923+ %2 = call <vscale x 4 x i32 > @llvm.riscv.vmul.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %1 , <vscale x 4 x i32 > %a , iXLen %vl )
924+ ret <vscale x 4 x i32 > %2
925+ }
926+
927+ define <vscale x 4 x i32 > @vrem_vv (<vscale x 4 x i32 > %a , <vscale x 4 x i32 > %b , iXLen %vl ) {
928+ ; NOVLOPT-LABEL: vrem_vv:
929+ ; NOVLOPT: # %bb.0:
930+ ; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
931+ ; NOVLOPT-NEXT: vrem.vv v8, v8, v10
932+ ; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
933+ ; NOVLOPT-NEXT: vmul.vv v8, v8, v10
934+ ; NOVLOPT-NEXT: ret
935+ ;
936+ ; VLOPT-LABEL: vrem_vv:
937+ ; VLOPT: # %bb.0:
938+ ; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
939+ ; VLOPT-NEXT: vrem.vv v8, v8, v10
940+ ; VLOPT-NEXT: vmul.vv v8, v8, v10
941+ ; VLOPT-NEXT: ret
942+ %1 = call <vscale x 4 x i32 > @llvm.riscv.vrem.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %a , <vscale x 4 x i32 > %b , iXLen -1 )
943+ %2 = call <vscale x 4 x i32 > @llvm.riscv.vmul.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %1 , <vscale x 4 x i32 > %b , iXLen %vl )
944+ ret <vscale x 4 x i32 > %2
945+ }
946+
947+ define <vscale x 4 x i32 > @vrem_vx (<vscale x 4 x i32 > %a , i32 %b , iXLen %vl ) {
948+ ; NOVLOPT-LABEL: vrem_vx:
949+ ; NOVLOPT: # %bb.0:
950+ ; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
951+ ; NOVLOPT-NEXT: vrem.vx v10, v8, a0
952+ ; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
953+ ; NOVLOPT-NEXT: vmul.vv v8, v10, v8
954+ ; NOVLOPT-NEXT: ret
955+ ;
956+ ; VLOPT-LABEL: vrem_vx:
957+ ; VLOPT: # %bb.0:
958+ ; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
959+ ; VLOPT-NEXT: vrem.vx v10, v8, a0
960+ ; VLOPT-NEXT: vmul.vv v8, v10, v8
961+ ; VLOPT-NEXT: ret
962+ %1 = call <vscale x 4 x i32 > @llvm.riscv.vrem.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %a , i32 %b , iXLen -1 )
963+ %2 = call <vscale x 4 x i32 > @llvm.riscv.vmul.nxv4i32.nxv4i32 (<vscale x 4 x i32 > poison, <vscale x 4 x i32 > %1 , <vscale x 4 x i32 > %a , iXLen %vl )
964+ ret <vscale x 4 x i32 > %2
965+ }
966+
807967define <vscale x 4 x i32 > @vwmacc_vx (<vscale x 4 x i16 > %a , i16 %b , iXLen %vl ) {
808968; NOVLOPT-LABEL: vwmacc_vx:
809969; NOVLOPT: # %bb.0:
0 commit comments