@@ -271,6 +271,14 @@ typedef enum S390Opcode {
271
271
VRIc_VREP = 0xe74d ,
272
272
273
273
VRRa_VLR = 0xe756 ,
274
+ VRRc_VA = 0xe7f3 ,
275
+ VRRc_VCEQ = 0xe7f8 , /* we leave the m5 cs field 0 */
276
+ VRRc_VCH = 0xe7fb , /* " */
277
+ VRRc_VCHL = 0xe7f9 , /* " */
278
+ VRRc_VN = 0xe768 ,
279
+ VRRc_VO = 0xe76a ,
280
+ VRRc_VS = 0xe7f7 ,
281
+ VRRc_VX = 0xe76d ,
274
282
VRRf_VLVGP = 0xe762 ,
275
283
276
284
VRSb_VLVG = 0xe722 ,
@@ -607,6 +615,17 @@ static void tcg_out_insn_VRRa(TCGContext *s, S390Opcode op,
607
615
tcg_out32 (s, (op & 0x00ff ) | RXB (v1, v2, 0 , 0 ) | (m3 << 12 ));
608
616
}
609
617
618
+ static void tcg_out_insn_VRRc (TCGContext *s, S390Opcode op,
619
+ TCGReg v1, TCGReg v2, TCGReg v3, int m4)
620
+ {
621
+ tcg_debug_assert (is_vector_reg (v1));
622
+ tcg_debug_assert (is_vector_reg (v2));
623
+ tcg_debug_assert (is_vector_reg (v3));
624
+ tcg_out16 (s, (op & 0xff00 ) | ((v1 & 0xf ) << 4 ) | (v2 & 0xf ));
625
+ tcg_out16 (s, v3 << 12 );
626
+ tcg_out16 (s, (op & 0x00ff ) | RXB (v1, v2, v3, 0 ) | (m4 << 12 ));
627
+ }
628
+
610
629
static void tcg_out_insn_VRRf (TCGContext *s, S390Opcode op,
611
630
TCGReg v1, TCGReg r2, TCGReg r3)
612
631
{
@@ -2636,18 +2655,145 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2636
2655
unsigned vecl, unsigned vece,
2637
2656
const TCGArg *args, const int *const_args)
2638
2657
{
2639
- g_assert_not_reached ();
2658
+ TCGType type = vecl + TCG_TYPE_V64;
2659
+ TCGArg a0 = args[0 ], a1 = args[1 ], a2 = args[2 ];
2660
+
2661
+ switch (opc) {
2662
+ case INDEX_op_ld_vec:
2663
+ tcg_out_ld (s, type, a0, a1, a2);
2664
+ break ;
2665
+ case INDEX_op_st_vec:
2666
+ tcg_out_st (s, type, a0, a1, a2);
2667
+ break ;
2668
+ case INDEX_op_dupm_vec:
2669
+ tcg_out_dupm_vec (s, type, vece, a0, a1, a2);
2670
+ break ;
2671
+
2672
+ case INDEX_op_add_vec:
2673
+ tcg_out_insn (s, VRRc, VA, a0, a1, a2, vece);
2674
+ break ;
2675
+ case INDEX_op_sub_vec:
2676
+ tcg_out_insn (s, VRRc, VS, a0, a1, a2, vece);
2677
+ break ;
2678
+ case INDEX_op_and_vec:
2679
+ tcg_out_insn (s, VRRc, VN, a0, a1, a2, 0 );
2680
+ break ;
2681
+ case INDEX_op_or_vec:
2682
+ tcg_out_insn (s, VRRc, VO, a0, a1, a2, 0 );
2683
+ break ;
2684
+ case INDEX_op_xor_vec:
2685
+ tcg_out_insn (s, VRRc, VX, a0, a1, a2, 0 );
2686
+ break ;
2687
+
2688
+ case INDEX_op_cmp_vec:
2689
+ switch ((TCGCond)args[3 ]) {
2690
+ case TCG_COND_EQ:
2691
+ tcg_out_insn (s, VRRc, VCEQ, a0, a1, a2, vece);
2692
+ break ;
2693
+ case TCG_COND_GT:
2694
+ tcg_out_insn (s, VRRc, VCH, a0, a1, a2, vece);
2695
+ break ;
2696
+ case TCG_COND_GTU:
2697
+ tcg_out_insn (s, VRRc, VCHL, a0, a1, a2, vece);
2698
+ break ;
2699
+ default :
2700
+ g_assert_not_reached ();
2701
+ }
2702
+ break ;
2703
+
2704
+ case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
2705
+ case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
2706
+ default :
2707
+ g_assert_not_reached ();
2708
+ }
2640
2709
}
2641
2710
2642
2711
int tcg_can_emit_vec_op (TCGOpcode opc, TCGType type, unsigned vece)
2643
2712
{
2644
- return 0 ;
2713
+ switch (opc) {
2714
+ case INDEX_op_add_vec:
2715
+ case INDEX_op_and_vec:
2716
+ case INDEX_op_or_vec:
2717
+ case INDEX_op_sub_vec:
2718
+ case INDEX_op_xor_vec:
2719
+ return 1 ;
2720
+ case INDEX_op_cmp_vec:
2721
+ return -1 ;
2722
+ default :
2723
+ return 0 ;
2724
+ }
2725
+ }
2726
+
2727
+ static bool expand_vec_cmp_noinv (TCGType type, unsigned vece, TCGv_vec v0,
2728
+ TCGv_vec v1, TCGv_vec v2, TCGCond cond)
2729
+ {
2730
+ bool need_swap = false , need_inv = false ;
2731
+
2732
+ switch (cond) {
2733
+ case TCG_COND_EQ:
2734
+ case TCG_COND_GT:
2735
+ case TCG_COND_GTU:
2736
+ break ;
2737
+ case TCG_COND_NE:
2738
+ case TCG_COND_LE:
2739
+ case TCG_COND_LEU:
2740
+ need_inv = true ;
2741
+ break ;
2742
+ case TCG_COND_LT:
2743
+ case TCG_COND_LTU:
2744
+ need_swap = true ;
2745
+ break ;
2746
+ case TCG_COND_GE:
2747
+ case TCG_COND_GEU:
2748
+ need_swap = need_inv = true ;
2749
+ break ;
2750
+ default :
2751
+ g_assert_not_reached ();
2752
+ }
2753
+
2754
+ if (need_inv) {
2755
+ cond = tcg_invert_cond (cond);
2756
+ }
2757
+ if (need_swap) {
2758
+ TCGv_vec t1;
2759
+ t1 = v1, v1 = v2, v2 = t1;
2760
+ cond = tcg_swap_cond (cond);
2761
+ }
2762
+
2763
+ vec_gen_4 (INDEX_op_cmp_vec, type, vece, tcgv_vec_arg (v0),
2764
+ tcgv_vec_arg (v1), tcgv_vec_arg (v2), cond);
2765
+
2766
+ return need_inv;
2767
+ }
2768
+
2769
+ static void expand_vec_cmp (TCGType type, unsigned vece, TCGv_vec v0,
2770
+ TCGv_vec v1, TCGv_vec v2, TCGCond cond)
2771
+ {
2772
+ if (expand_vec_cmp_noinv (type, vece, v0, v1, v2, cond)) {
2773
+ tcg_gen_not_vec (vece, v0, v0);
2774
+ }
2645
2775
}
2646
2776
2647
2777
void tcg_expand_vec_op (TCGOpcode opc, TCGType type, unsigned vece,
2648
2778
TCGArg a0, ...)
2649
2779
{
2650
- g_assert_not_reached ();
2780
+ va_list va;
2781
+ TCGv_vec v0, v1, v2;
2782
+
2783
+ va_start (va, a0);
2784
+ v0 = temp_tcgv_vec (arg_temp (a0));
2785
+ v1 = temp_tcgv_vec (arg_temp (va_arg (va, TCGArg)));
2786
+ v2 = temp_tcgv_vec (arg_temp (va_arg (va, TCGArg)));
2787
+
2788
+ switch (opc) {
2789
+ case INDEX_op_cmp_vec:
2790
+ expand_vec_cmp (type, vece, v0, v1, v2, va_arg (va, TCGArg));
2791
+ break ;
2792
+
2793
+ default :
2794
+ g_assert_not_reached ();
2795
+ }
2796
+ va_end (va);
2651
2797
}
2652
2798
2653
2799
static TCGConstraintSetIndex tcg_target_op_def (TCGOpcode op)
@@ -2839,7 +2985,7 @@ static void query_s390_facilities(void)
2839
2985
* There is nothing else we currently care about in the 3rd word, so
2840
2986
* disable VECTOR with one store.
2841
2987
*/
2842
- if (1 || !(hwcap & HWCAP_S390_VXRS)) {
2988
+ if (!(hwcap & HWCAP_S390_VXRS)) {
2843
2989
s390_facilities[2 ] = 0 ;
2844
2990
}
2845
2991
}
0 commit comments