|
16 | 16 | #include <asm/entry-common.h> |
17 | 17 | #include <asm/hwprobe.h> |
18 | 18 | #include <asm/cpufeature.h> |
| 19 | +#include <asm/vector.h> |
19 | 20 |
|
20 | 21 | #define INSN_MATCH_LB 0x3 |
21 | 22 | #define INSN_MASK_LB 0x707f |
@@ -322,12 +323,37 @@ union reg_data { |
322 | 323 | u64 data_u64; |
323 | 324 | }; |
324 | 325 |
|
325 | | -static bool unaligned_ctl __read_mostly; |
326 | | - |
327 | 326 | /* sysctl hooks */ |
328 | 327 | int unaligned_enabled __read_mostly = 1; /* Enabled by default */ |
329 | 328 |
|
330 | | -int handle_misaligned_load(struct pt_regs *regs) |
| 329 | +#ifdef CONFIG_RISCV_VECTOR_MISALIGNED |
| 330 | +static int handle_vector_misaligned_load(struct pt_regs *regs) |
| 331 | +{ |
| 332 | + unsigned long epc = regs->epc; |
| 333 | + unsigned long insn; |
| 334 | + |
| 335 | + if (get_insn(regs, epc, &insn)) |
| 336 | + return -1; |
| 337 | + |
| 338 | + /* Only return 0 when in check_vector_unaligned_access_emulated */ |
| 339 | + if (*this_cpu_ptr(&vector_misaligned_access) == RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) { |
| 340 | + *this_cpu_ptr(&vector_misaligned_access) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED; |
| 341 | + regs->epc = epc + INSN_LEN(insn); |
| 342 | + return 0; |
| 343 | + } |
| 344 | + |
| 345 | + /* If vector instruction we don't emulate it yet */ |
| 346 | + regs->epc = epc; |
| 347 | + return -1; |
| 348 | +} |
| 349 | +#else |
| 350 | +static int handle_vector_misaligned_load(struct pt_regs *regs) |
| 351 | +{ |
| 352 | + return -1; |
| 353 | +} |
| 354 | +#endif |
| 355 | + |
| 356 | +static int handle_scalar_misaligned_load(struct pt_regs *regs) |
331 | 357 | { |
332 | 358 | union reg_data val; |
333 | 359 | unsigned long epc = regs->epc; |
@@ -435,7 +461,7 @@ int handle_misaligned_load(struct pt_regs *regs) |
435 | 461 | return 0; |
436 | 462 | } |
437 | 463 |
|
438 | | -int handle_misaligned_store(struct pt_regs *regs) |
| 464 | +static int handle_scalar_misaligned_store(struct pt_regs *regs) |
439 | 465 | { |
440 | 466 | union reg_data val; |
441 | 467 | unsigned long epc = regs->epc; |
@@ -526,6 +552,91 @@ int handle_misaligned_store(struct pt_regs *regs) |
526 | 552 | return 0; |
527 | 553 | } |
528 | 554 |
|
| 555 | +int handle_misaligned_load(struct pt_regs *regs) |
| 556 | +{ |
| 557 | + unsigned long epc = regs->epc; |
| 558 | + unsigned long insn; |
| 559 | + |
| 560 | + if (IS_ENABLED(CONFIG_RISCV_VECTOR_MISALIGNED)) { |
| 561 | + if (get_insn(regs, epc, &insn)) |
| 562 | + return -1; |
| 563 | + |
| 564 | + if (insn_is_vector(insn)) |
| 565 | + return handle_vector_misaligned_load(regs); |
| 566 | + } |
| 567 | + |
| 568 | + if (IS_ENABLED(CONFIG_RISCV_SCALAR_MISALIGNED)) |
| 569 | + return handle_scalar_misaligned_load(regs); |
| 570 | + |
| 571 | + return -1; |
| 572 | +} |
| 573 | + |
| 574 | +int handle_misaligned_store(struct pt_regs *regs) |
| 575 | +{ |
| 576 | + if (IS_ENABLED(CONFIG_RISCV_SCALAR_MISALIGNED)) |
| 577 | + return handle_scalar_misaligned_store(regs); |
| 578 | + |
| 579 | + return -1; |
| 580 | +} |
| 581 | + |
| 582 | +#ifdef CONFIG_RISCV_VECTOR_MISALIGNED |
| 583 | +void check_vector_unaligned_access_emulated(struct work_struct *work __always_unused) |
| 584 | +{ |
| 585 | + long *mas_ptr = this_cpu_ptr(&vector_misaligned_access); |
| 586 | + unsigned long tmp_var; |
| 587 | + |
| 588 | + *mas_ptr = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN; |
| 589 | + |
| 590 | + kernel_vector_begin(); |
| 591 | + /* |
| 592 | + * In pre-13.0.0 versions of GCC, vector registers cannot appear in |
| 593 | + * the clobber list. This inline asm clobbers v0, but since we do not |
| 594 | + * currently build the kernel with V enabled, the v0 clobber arg is not |
| 595 | + * needed (as the compiler will not emit vector code itself). If the kernel |
| 596 | + * is changed to build with V enabled, the clobber arg will need to be |
| 597 | + * added here. |
| 598 | + */ |
| 599 | + __asm__ __volatile__ ( |
| 600 | + ".balign 4\n\t" |
| 601 | + ".option push\n\t" |
| 602 | + ".option arch, +zve32x\n\t" |
| 603 | + " vsetivli zero, 1, e16, m1, ta, ma\n\t" // Vectors of 16b |
| 604 | + " vle16.v v0, (%[ptr])\n\t" // Load bytes |
| 605 | + ".option pop\n\t" |
| 606 | + : : [ptr] "r" ((u8 *)&tmp_var + 1)); |
| 607 | + kernel_vector_end(); |
| 608 | +} |
| 609 | + |
| 610 | +bool check_vector_unaligned_access_emulated_all_cpus(void) |
| 611 | +{ |
| 612 | + int cpu; |
| 613 | + |
| 614 | + if (!has_vector()) { |
| 615 | + for_each_online_cpu(cpu) |
| 616 | + per_cpu(vector_misaligned_access, cpu) = RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED; |
| 617 | + return false; |
| 618 | + } |
| 619 | + |
| 620 | + schedule_on_each_cpu(check_vector_unaligned_access_emulated); |
| 621 | + |
| 622 | + for_each_online_cpu(cpu) |
| 623 | + if (per_cpu(vector_misaligned_access, cpu) |
| 624 | + == RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) |
| 625 | + return false; |
| 626 | + |
| 627 | + return true; |
| 628 | +} |
| 629 | +#else |
| 630 | +bool check_vector_unaligned_access_emulated_all_cpus(void) |
| 631 | +{ |
| 632 | + return false; |
| 633 | +} |
| 634 | +#endif |
| 635 | + |
| 636 | +#ifdef CONFIG_RISCV_SCALAR_MISALIGNED |
| 637 | + |
| 638 | +static bool unaligned_ctl __read_mostly; |
| 639 | + |
529 | 640 | void check_unaligned_access_emulated(struct work_struct *work __always_unused) |
530 | 641 | { |
531 | 642 | int cpu = smp_processor_id(); |
@@ -574,3 +685,9 @@ bool unaligned_ctl_available(void) |
574 | 685 | { |
575 | 686 | return unaligned_ctl; |
576 | 687 | } |
| 688 | +#else |
| 689 | +bool check_unaligned_access_emulated_all_cpus(void) |
| 690 | +{ |
| 691 | + return false; |
| 692 | +} |
| 693 | +#endif |
0 commit comments