|
12 | 12 | #include <linux/init.h> |
13 | 13 | #include <linux/sched.h> |
14 | 14 | #include <linux/smp.h> |
| 15 | +#include <linux/memblock.h> |
15 | 16 | #include <linux/mm.h> |
16 | 17 | #include <linux/hugetlb.h> |
17 | 18 | #include <linux/export.h> |
| 19 | +#include <linux/sort.h> |
18 | 20 |
|
19 | 21 | #include <asm/cpu.h> |
20 | 22 | #include <asm/cpu-type.h> |
@@ -508,58 +510,95 @@ static int __init set_ntlb(char *str) |
508 | 510 |
|
509 | 511 | __setup("ntlb=", set_ntlb); |
510 | 512 |
|
511 | | -/* Initialise all TLB entries with unique values */ |
512 | | -static void r4k_tlb_uniquify(void) |
| 513 | + |
| 514 | +/* Comparison function for EntryHi VPN fields. */ |
| 515 | +static int r4k_vpn_cmp(const void *a, const void *b) |
513 | 516 | { |
514 | | - int entry = num_wired_entries(); |
| 517 | + long v = *(unsigned long *)a - *(unsigned long *)b; |
| 518 | + int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0; |
| 519 | + return s ? (v != 0) | v >> s : v; |
| 520 | +} |
| 521 | + |
| 522 | +/* |
| 523 | + * Initialise all TLB entries with unique values that do not clash with |
| 524 | + * what we have been handed over and what we'll be using ourselves. |
| 525 | + */ |
| 526 | +static void __ref r4k_tlb_uniquify(void) |
| 527 | +{ |
| 528 | + int tlbsize = current_cpu_data.tlbsize; |
| 529 | + bool use_slab = slab_is_available(); |
| 530 | + int start = num_wired_entries(); |
| 531 | + phys_addr_t tlb_vpn_size; |
| 532 | + unsigned long *tlb_vpns; |
| 533 | + unsigned long vpn_mask; |
| 534 | + int cnt, ent, idx, i; |
| 535 | + |
| 536 | + vpn_mask = GENMASK(cpu_vmbits - 1, 13); |
| 537 | + vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31; |
| 538 | + |
| 539 | + tlb_vpn_size = tlbsize * sizeof(*tlb_vpns); |
| 540 | + tlb_vpns = (use_slab ? |
| 541 | + kmalloc(tlb_vpn_size, GFP_KERNEL) : |
| 542 | + memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns))); |
| 543 | + if (WARN_ON(!tlb_vpns)) |
| 544 | + return; /* Pray local_flush_tlb_all() is good enough. */ |
515 | 545 |
|
516 | 546 | htw_stop(); |
| 547 | + |
| 548 | + for (i = start, cnt = 0; i < tlbsize; i++, cnt++) { |
| 549 | + unsigned long vpn; |
| 550 | + |
| 551 | + write_c0_index(i); |
| 552 | + mtc0_tlbr_hazard(); |
| 553 | + tlb_read(); |
| 554 | + tlb_read_hazard(); |
| 555 | + vpn = read_c0_entryhi(); |
| 556 | + vpn &= vpn_mask & PAGE_MASK; |
| 557 | + tlb_vpns[cnt] = vpn; |
| 558 | + |
| 559 | + /* Prevent any large pages from overlapping regular ones. */ |
| 560 | + write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK); |
| 561 | + mtc0_tlbw_hazard(); |
| 562 | + tlb_write_indexed(); |
| 563 | + tlbw_use_hazard(); |
| 564 | + } |
| 565 | + |
| 566 | + sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL); |
| 567 | + |
| 568 | + write_c0_pagemask(PM_DEFAULT_MASK); |
517 | 569 | write_c0_entrylo0(0); |
518 | 570 | write_c0_entrylo1(0); |
519 | 571 |
|
520 | | - while (entry < current_cpu_data.tlbsize) { |
521 | | - unsigned long asid_mask = cpu_asid_mask(¤t_cpu_data); |
522 | | - unsigned long asid = 0; |
523 | | - int idx; |
| 572 | + idx = 0; |
| 573 | + ent = tlbsize; |
| 574 | + for (i = start; i < tlbsize; i++) |
| 575 | + while (1) { |
| 576 | + unsigned long entryhi, vpn; |
524 | 577 |
|
525 | | - /* Skip wired MMID to make ginvt_mmid work */ |
526 | | - if (cpu_has_mmid) |
527 | | - asid = MMID_KERNEL_WIRED + 1; |
| 578 | + entryhi = UNIQUE_ENTRYHI(ent); |
| 579 | + vpn = entryhi & vpn_mask & PAGE_MASK; |
528 | 580 |
|
529 | | - /* Check for match before using UNIQUE_ENTRYHI */ |
530 | | - do { |
531 | | - if (cpu_has_mmid) { |
532 | | - write_c0_memorymapid(asid); |
533 | | - write_c0_entryhi(UNIQUE_ENTRYHI(entry)); |
| 581 | + if (idx >= cnt || vpn < tlb_vpns[idx]) { |
| 582 | + write_c0_entryhi(entryhi); |
| 583 | + write_c0_index(i); |
| 584 | + mtc0_tlbw_hazard(); |
| 585 | + tlb_write_indexed(); |
| 586 | + ent++; |
| 587 | + break; |
| 588 | + } else if (vpn == tlb_vpns[idx]) { |
| 589 | + ent++; |
534 | 590 | } else { |
535 | | - write_c0_entryhi(UNIQUE_ENTRYHI(entry) | asid); |
| 591 | + idx++; |
536 | 592 | } |
537 | | - mtc0_tlbw_hazard(); |
538 | | - tlb_probe(); |
539 | | - tlb_probe_hazard(); |
540 | | - idx = read_c0_index(); |
541 | | - /* No match or match is on current entry */ |
542 | | - if (idx < 0 || idx == entry) |
543 | | - break; |
544 | | - /* |
545 | | - * If we hit a match, we need to try again with |
546 | | - * a different ASID. |
547 | | - */ |
548 | | - asid++; |
549 | | - } while (asid < asid_mask); |
550 | | - |
551 | | - if (idx >= 0 && idx != entry) |
552 | | - panic("Unable to uniquify TLB entry %d", idx); |
553 | | - |
554 | | - write_c0_index(entry); |
555 | | - mtc0_tlbw_hazard(); |
556 | | - tlb_write_indexed(); |
557 | | - entry++; |
558 | | - } |
| 593 | + } |
559 | 594 |
|
560 | 595 | tlbw_use_hazard(); |
561 | 596 | htw_start(); |
562 | 597 | flush_micro_tlb(); |
| 598 | + if (use_slab) |
| 599 | + kfree(tlb_vpns); |
| 600 | + else |
| 601 | + memblock_free(tlb_vpns, tlb_vpn_size); |
563 | 602 | } |
564 | 603 |
|
565 | 604 | /* |
@@ -602,6 +641,7 @@ static void r4k_tlb_configure(void) |
602 | 641 |
|
603 | 642 | /* From this point on the ARC firmware is dead. */ |
604 | 643 | r4k_tlb_uniquify(); |
| 644 | + local_flush_tlb_all(); |
605 | 645 |
|
606 | 646 | /* Did I tell you that ARC SUCKS? */ |
607 | 647 | } |
|
0 commit comments