43
43
#define TCG_CT_CONST_ZERO 0x800
44
44
45
45
#define ALL_GENERAL_REGS MAKE_64BIT_MASK (0 , 16 )
46
+ #define ALL_VECTOR_REGS MAKE_64BIT_MASK (32 , 32 )
47
+
46
48
/*
47
49
* For softmmu, we need to avoid conflicts with the first 3
48
50
* argument registers to perform the tlb lookup, and to call
@@ -268,8 +270,13 @@ typedef enum S390Opcode {
268
270
269
271
#ifdef CONFIG_DEBUG_TCG
270
272
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
271
- " %r0" , " %r1" , " %r2" , " %r3" , " %r4" , " %r5" , " %r6" , " %r7" ,
272
- " %r8" , " %r9" , " %r10" " %r11" " %r12" " %r13" " %r14" " %r15"
273
+ " %r0" , " %r1" , " %r2" , " %r3" , " %r4" , " %r5" , " %r6" , " %r7" ,
274
+ " %r8" , " %r9" , " %r10" , " %r11" , " %r12" , " %r13" , " %r14" , " %r15" ,
275
+ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ,
276
+ " %v0" , " %v1" , " %v2" , " %v3" , " %v4" , " %v5" , " %v6" , " %v7" ,
277
+ " %v8" , " %v9" , " %v10" , " %v11" , " %v12" , " %v13" , " %v14" , " %v15" ,
278
+ " %v16" , " %v17" , " %v18" , " %v19" , " %v20" , " %v21" , " %v22" , " %v23" ,
279
+ " %v24" , " %v25" , " %v26" , " %v27" , " %v28" , " %v29" , " %v30" , " %v31" ,
273
280
};
274
281
#endif
275
282
@@ -295,6 +302,32 @@ static const int tcg_target_reg_alloc_order[] = {
295
302
TCG_REG_R4,
296
303
TCG_REG_R3,
297
304
TCG_REG_R2,
305
+
306
+ /* V8-V15 are call saved, and omitted. */
307
+ TCG_REG_V0,
308
+ TCG_REG_V1,
309
+ TCG_REG_V2,
310
+ TCG_REG_V3,
311
+ TCG_REG_V4,
312
+ TCG_REG_V5,
313
+ TCG_REG_V6,
314
+ TCG_REG_V7,
315
+ TCG_REG_V16,
316
+ TCG_REG_V17,
317
+ TCG_REG_V18,
318
+ TCG_REG_V19,
319
+ TCG_REG_V20,
320
+ TCG_REG_V21,
321
+ TCG_REG_V22,
322
+ TCG_REG_V23,
323
+ TCG_REG_V24,
324
+ TCG_REG_V25,
325
+ TCG_REG_V26,
326
+ TCG_REG_V27,
327
+ TCG_REG_V28,
328
+ TCG_REG_V29,
329
+ TCG_REG_V30,
330
+ TCG_REG_V31,
298
331
};
299
332
300
333
static const int tcg_target_call_iarg_regs[] = {
@@ -377,7 +410,7 @@ static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
377
410
#endif
378
411
379
412
static const tcg_insn_unit *tb_ret_addr;
380
- uint64_t s390_facilities[1 ];
413
+ uint64_t s390_facilities[3 ];
381
414
382
415
static bool patch_reloc (tcg_insn_unit *src_rw, int type,
383
416
intptr_t value, intptr_t addend)
@@ -2293,6 +2326,42 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
2293
2326
}
2294
2327
}
2295
2328
2329
+ static bool tcg_out_dup_vec (TCGContext *s, TCGType type, unsigned vece,
2330
+ TCGReg dst, TCGReg src)
2331
+ {
2332
+ g_assert_not_reached ();
2333
+ }
2334
+
2335
+ static bool tcg_out_dupm_vec (TCGContext *s, TCGType type, unsigned vece,
2336
+ TCGReg dst, TCGReg base, intptr_t offset)
2337
+ {
2338
+ g_assert_not_reached ();
2339
+ }
2340
+
2341
+ static void tcg_out_dupi_vec (TCGContext *s, TCGType type, unsigned vece,
2342
+ TCGReg dst, int64_t val)
2343
+ {
2344
+ g_assert_not_reached ();
2345
+ }
2346
+
2347
+ static void tcg_out_vec_op (TCGContext *s, TCGOpcode opc,
2348
+ unsigned vecl, unsigned vece,
2349
+ const TCGArg *args, const int *const_args)
2350
+ {
2351
+ g_assert_not_reached ();
2352
+ }
2353
+
2354
+ int tcg_can_emit_vec_op (TCGOpcode opc, TCGType type, unsigned vece)
2355
+ {
2356
+ return 0 ;
2357
+ }
2358
+
2359
+ void tcg_expand_vec_op (TCGOpcode opc, TCGType type, unsigned vece,
2360
+ TCGArg a0, ...)
2361
+ {
2362
+ g_assert_not_reached ();
2363
+ }
2364
+
2296
2365
static TCGConstraintSetIndex tcg_target_op_def (TCGOpcode op)
2297
2366
{
2298
2367
switch (op) {
@@ -2433,11 +2502,34 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
2433
2502
? C_O2_I4 (r, r, 0 , 1 , rA, r)
2434
2503
: C_O2_I4 (r, r, 0 , 1 , r, r));
2435
2504
2505
+ case INDEX_op_st_vec:
2506
+ return C_O0_I2 (v, r);
2507
+ case INDEX_op_ld_vec:
2508
+ case INDEX_op_dupm_vec:
2509
+ return C_O1_I1 (v, r);
2510
+ case INDEX_op_dup_vec:
2511
+ return C_O1_I1 (v, vr);
2512
+ case INDEX_op_add_vec:
2513
+ case INDEX_op_sub_vec:
2514
+ case INDEX_op_and_vec:
2515
+ case INDEX_op_or_vec:
2516
+ case INDEX_op_xor_vec:
2517
+ case INDEX_op_cmp_vec:
2518
+ return C_O1_I2 (v, v, v);
2519
+
2436
2520
default :
2437
2521
g_assert_not_reached ();
2438
2522
}
2439
2523
}
2440
2524
2525
+ /*
2526
+ * Mainline glibc added HWCAP_S390_VX before it was kernel abi.
2527
+ * Some distros have fixed this up locally, others have not.
2528
+ */
2529
+ #ifndef HWCAP_S390_VXRS
2530
+ #define HWCAP_S390_VXRS 2048
2531
+ #endif
2532
+
2441
2533
static void query_s390_facilities (void )
2442
2534
{
2443
2535
unsigned long hwcap = qemu_getauxval (AT_HWCAP);
@@ -2452,6 +2544,16 @@ static void query_s390_facilities(void)
2452
2544
asm volatile (" .word 0xb2b0,0x1000"
2453
2545
: " =r" (r0) : " r" (r0), " r" (r1) : " memory" , " cc" );
2454
2546
}
2547
+
2548
+ /*
2549
+ * Use of vector registers requires os support beyond the facility bit.
2550
+ * If the kernel does not advertise support, disable the facility bits.
2551
+ * There is nothing else we currently care about in the 3rd word, so
2552
+ * disable VECTOR with one store.
2553
+ */
2554
+ if (1 || !(hwcap & HWCAP_S390_VXRS)) {
2555
+ s390_facilities[2 ] = 0 ;
2556
+ }
2455
2557
}
2456
2558
2457
2559
static void tcg_target_init (TCGContext *s)
@@ -2460,6 +2562,10 @@ static void tcg_target_init(TCGContext *s)
2460
2562
2461
2563
tcg_target_available_regs[TCG_TYPE_I32] = 0xffff ;
2462
2564
tcg_target_available_regs[TCG_TYPE_I64] = 0xffff ;
2565
+ if (HAVE_FACILITY (VECTOR)) {
2566
+ tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull ;
2567
+ tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull ;
2568
+ }
2463
2569
2464
2570
tcg_target_call_clobber_regs = 0 ;
2465
2571
tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_R0);
@@ -2474,6 +2580,31 @@ static void tcg_target_init(TCGContext *s)
2474
2580
/* The return register can be considered call-clobbered. */
2475
2581
tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_R14);
2476
2582
2583
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V0);
2584
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V1);
2585
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V2);
2586
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V3);
2587
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V4);
2588
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V5);
2589
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V6);
2590
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V7);
2591
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V16);
2592
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V17);
2593
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V18);
2594
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V19);
2595
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V20);
2596
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V21);
2597
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V22);
2598
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V23);
2599
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V24);
2600
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V25);
2601
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V26);
2602
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V27);
2603
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V28);
2604
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V29);
2605
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V30);
2606
+ tcg_regset_set_reg (tcg_target_call_clobber_regs, TCG_REG_V31);
2607
+
2477
2608
s->reserved_regs = 0 ;
2478
2609
tcg_regset_set_reg (s->reserved_regs , TCG_TMP0);
2479
2610
/* XXX many insns can't be used with R0, so we better avoid it for now */
0 commit comments