|
12 | 12 | #include <linux/init.h> |
13 | 13 | #include <linux/sched.h> |
14 | 14 | #include <linux/smp.h> |
| 15 | +#include <linux/memblock.h> |
15 | 16 | #include <linux/mm.h> |
16 | 17 | #include <linux/hugetlb.h> |
17 | 18 | #include <linux/export.h> |
| 19 | +#include <linux/sort.h> |
18 | 20 |
|
19 | 21 | #include <asm/cpu.h> |
20 | 22 | #include <asm/cpu-type.h> |
@@ -506,58 +508,95 @@ static int __init set_ntlb(char *str) |
506 | 508 |
|
507 | 509 | __setup("ntlb=", set_ntlb); |
508 | 510 |
|
509 | | -/* Initialise all TLB entries with unique values */ |
510 | | -static void r4k_tlb_uniquify(void) |
| 511 | + |
| 512 | +/* Comparison function for EntryHi VPN fields. */ |
| 513 | +static int r4k_vpn_cmp(const void *a, const void *b) |
511 | 514 | { |
512 | | - int entry = num_wired_entries(); |
| 515 | + long v = *(unsigned long *)a - *(unsigned long *)b; |
| 516 | + int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0; |
| 517 | + return s ? (v != 0) | v >> s : v; |
| 518 | +} |
| 519 | + |
| 520 | +/* |
| 521 | + * Initialise all TLB entries with unique values that do not clash with |
| 522 | + * what we have been handed over and what we'll be using ourselves. |
| 523 | + */ |
| 524 | +static void __ref r4k_tlb_uniquify(void) |
| 525 | +{ |
| 526 | + int tlbsize = current_cpu_data.tlbsize; |
| 527 | + bool use_slab = slab_is_available(); |
| 528 | + int start = num_wired_entries(); |
| 529 | + phys_addr_t tlb_vpn_size; |
| 530 | + unsigned long *tlb_vpns; |
| 531 | + unsigned long vpn_mask; |
| 532 | + int cnt, ent, idx, i; |
| 533 | + |
| 534 | + vpn_mask = GENMASK(cpu_vmbits - 1, 13); |
| 535 | + vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31; |
| 536 | + |
| 537 | + tlb_vpn_size = tlbsize * sizeof(*tlb_vpns); |
| 538 | + tlb_vpns = (use_slab ? |
| 539 | + kmalloc(tlb_vpn_size, GFP_KERNEL) : |
| 540 | + memblock_alloc_raw(tlb_vpn_size, sizeof(*tlb_vpns))); |
| 541 | + if (WARN_ON(!tlb_vpns)) |
| 542 | + return; /* Pray local_flush_tlb_all() is good enough. */ |
513 | 543 |
|
514 | 544 | htw_stop(); |
| 545 | + |
| 546 | + for (i = start, cnt = 0; i < tlbsize; i++, cnt++) { |
| 547 | + unsigned long vpn; |
| 548 | + |
| 549 | + write_c0_index(i); |
| 550 | + mtc0_tlbr_hazard(); |
| 551 | + tlb_read(); |
| 552 | + tlb_read_hazard(); |
| 553 | + vpn = read_c0_entryhi(); |
| 554 | + vpn &= vpn_mask & PAGE_MASK; |
| 555 | + tlb_vpns[cnt] = vpn; |
| 556 | + |
| 557 | + /* Prevent any large pages from overlapping regular ones. */ |
| 558 | + write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK); |
| 559 | + mtc0_tlbw_hazard(); |
| 560 | + tlb_write_indexed(); |
| 561 | + tlbw_use_hazard(); |
| 562 | + } |
| 563 | + |
| 564 | + sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL); |
| 565 | + |
| 566 | + write_c0_pagemask(PM_DEFAULT_MASK); |
515 | 567 | write_c0_entrylo0(0); |
516 | 568 | write_c0_entrylo1(0); |
517 | 569 |
|
518 | | - while (entry < current_cpu_data.tlbsize) { |
519 | | - unsigned long asid_mask = cpu_asid_mask(¤t_cpu_data); |
520 | | - unsigned long asid = 0; |
521 | | - int idx; |
| 570 | + idx = 0; |
| 571 | + ent = tlbsize; |
| 572 | + for (i = start; i < tlbsize; i++) |
| 573 | + while (1) { |
| 574 | + unsigned long entryhi, vpn; |
522 | 575 |
|
523 | | - /* Skip wired MMID to make ginvt_mmid work */ |
524 | | - if (cpu_has_mmid) |
525 | | - asid = MMID_KERNEL_WIRED + 1; |
| 576 | + entryhi = UNIQUE_ENTRYHI(ent); |
| 577 | + vpn = entryhi & vpn_mask & PAGE_MASK; |
526 | 578 |
|
527 | | - /* Check for match before using UNIQUE_ENTRYHI */ |
528 | | - do { |
529 | | - if (cpu_has_mmid) { |
530 | | - write_c0_memorymapid(asid); |
531 | | - write_c0_entryhi(UNIQUE_ENTRYHI(entry)); |
| 579 | + if (idx >= cnt || vpn < tlb_vpns[idx]) { |
| 580 | + write_c0_entryhi(entryhi); |
| 581 | + write_c0_index(i); |
| 582 | + mtc0_tlbw_hazard(); |
| 583 | + tlb_write_indexed(); |
| 584 | + ent++; |
| 585 | + break; |
| 586 | + } else if (vpn == tlb_vpns[idx]) { |
| 587 | + ent++; |
532 | 588 | } else { |
533 | | - write_c0_entryhi(UNIQUE_ENTRYHI(entry) | asid); |
| 589 | + idx++; |
534 | 590 | } |
535 | | - mtc0_tlbw_hazard(); |
536 | | - tlb_probe(); |
537 | | - tlb_probe_hazard(); |
538 | | - idx = read_c0_index(); |
539 | | - /* No match or match is on current entry */ |
540 | | - if (idx < 0 || idx == entry) |
541 | | - break; |
542 | | - /* |
543 | | - * If we hit a match, we need to try again with |
544 | | - * a different ASID. |
545 | | - */ |
546 | | - asid++; |
547 | | - } while (asid < asid_mask); |
548 | | - |
549 | | - if (idx >= 0 && idx != entry) |
550 | | - panic("Unable to uniquify TLB entry %d", idx); |
551 | | - |
552 | | - write_c0_index(entry); |
553 | | - mtc0_tlbw_hazard(); |
554 | | - tlb_write_indexed(); |
555 | | - entry++; |
556 | | - } |
| 591 | + } |
557 | 592 |
|
558 | 593 | tlbw_use_hazard(); |
559 | 594 | htw_start(); |
560 | 595 | flush_micro_tlb(); |
| 596 | + if (use_slab) |
| 597 | + kfree(tlb_vpns); |
| 598 | + else |
| 599 | + memblock_free(tlb_vpns, tlb_vpn_size); |
561 | 600 | } |
562 | 601 |
|
563 | 602 | /* |
@@ -600,6 +639,7 @@ static void r4k_tlb_configure(void) |
600 | 639 |
|
601 | 640 | /* From this point on the ARC firmware is dead. */ |
602 | 641 | r4k_tlb_uniquify(); |
| 642 | + local_flush_tlb_all(); |
603 | 643 |
|
604 | 644 | /* Did I tell you that ARC SUCKS? */ |
605 | 645 | } |
|
0 commit comments