Skip to content

Commit 6732c0e

Browse files
committed
ARC: mm: retire support for aliasing VIPT D$
Legacy ARC700 processors (first generation of MMU enabled ARC cores) had VIPT cached which could be configured such that they could alias. Corresponding support in kernel (with all the obnoxious cache flush overhead) was added in ARC port 10 years ago to support 1 silicon. That is long bygone and we can let it RIP. Cc: Matthew Wilcox (Oracle) <[email protected]> Signed-off-by: Vineet Gupta <[email protected]>
1 parent 3a02ec2 commit 6732c0e

File tree

5 files changed

+14
-207
lines changed

5 files changed

+14
-207
lines changed

arch/arc/Kconfig

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,6 @@ config ARC
4949
select OF
5050
select OF_EARLY_FLATTREE
5151
select PCI_SYSCALL if PCI
52-
select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING
5352
select HAVE_ARCH_JUMP_LABEL if ISA_ARCV2 && !CPU_ENDIAN_BE32
5453
select TRACE_IRQFLAGS_SUPPORT
5554

@@ -232,10 +231,6 @@ config ARC_CACHE_PAGES
232231
Note that Global I/D ENABLE + Per Page DISABLE works but corollary
233232
Global DISABLE + Per Page ENABLE won't work
234233

235-
config ARC_CACHE_VIPT_ALIASING
236-
bool "Support VIPT Aliasing D$"
237-
depends on ARC_HAS_DCACHE && ISA_ARCOMPACT
238-
239234
endif #ARC_CACHE
240235

241236
config ARC_HAS_ICCM

arch/arc/include/asm/cacheflush.h

Lines changed: 0 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -44,60 +44,17 @@ void dma_cache_wback(phys_addr_t start, unsigned long sz);
4444

4545
#define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */
4646

47-
#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
48-
4947
#define flush_cache_mm(mm) /* called on munmap/exit */
5048
#define flush_cache_range(mm, u_vstart, u_vend)
5149
#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
5250

53-
#else /* VIPT aliasing dcache */
54-
55-
/* To clear out stale userspace mappings */
56-
void flush_cache_mm(struct mm_struct *mm);
57-
void flush_cache_range(struct vm_area_struct *vma,
58-
unsigned long start,unsigned long end);
59-
void flush_cache_page(struct vm_area_struct *vma,
60-
unsigned long user_addr, unsigned long page);
61-
62-
/*
63-
* To make sure that userspace mapping is flushed to memory before
64-
* get_user_pages() uses a kernel mapping to access the page
65-
*/
66-
#define ARCH_HAS_FLUSH_ANON_PAGE
67-
void flush_anon_page(struct vm_area_struct *vma,
68-
struct page *page, unsigned long u_vaddr);
69-
70-
#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
71-
7251
/*
7352
* A new pagecache page has PG_arch_1 clear - thus dcache dirty by default
7453
* This works around some PIO based drivers which don't call flush_dcache_page
7554
* to record that they dirtied the dcache
7655
*/
7756
#define PG_dc_clean PG_arch_1
7857

79-
#define CACHE_COLORS_NUM 4
80-
#define CACHE_COLORS_MSK (CACHE_COLORS_NUM - 1)
81-
#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & CACHE_COLORS_MSK)
82-
83-
/*
84-
* Simple wrapper over config option
85-
* Bootup code ensures that hardware matches kernel configuration
86-
*/
87-
static inline int cache_is_vipt_aliasing(void)
88-
{
89-
return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
90-
}
91-
92-
/*
93-
* checks if two addresses (after page aligning) index into same cache set
94-
*/
95-
#define addr_not_cache_congruent(addr1, addr2) \
96-
({ \
97-
cache_is_vipt_aliasing() ? \
98-
(CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0; \
99-
})
100-
10158
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
10259
do { \
10360
memcpy(dst, src, len); \

arch/arc/mm/cache.c

Lines changed: 6 additions & 130 deletions
Original file line numberDiff line numberDiff line change
@@ -145,10 +145,9 @@ int arc_cache_mumbojumbo(int c, char *buf, int len)
145145
p_dc->sz_k = 1 << (dbcr.sz - 1);
146146

147147
n += scnprintf(buf + n, len - n,
148-
"D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s%s\n",
148+
"D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s\n",
149149
p_dc->sz_k, assoc, p_dc->line_len,
150150
vipt ? "VIPT" : "PIPT",
151-
p_dc->colors > 1 ? " aliasing" : "",
152151
IS_USED_CFG(CONFIG_ARC_HAS_DCACHE));
153152

154153
slc_chk:
@@ -703,51 +702,10 @@ static inline void arc_slc_enable(void)
703702
* Exported APIs
704703
*/
705704

706-
/*
707-
* Handle cache congruency of kernel and userspace mappings of page when kernel
708-
* writes-to/reads-from
709-
*
710-
* The idea is to defer flushing of kernel mapping after a WRITE, possible if:
711-
* -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
712-
* -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
713-
* -In SMP, if hardware caches are coherent
714-
*
715-
* There's a corollary case, where kernel READs from a userspace mapped page.
716-
* If the U-mapping is not congruent to K-mapping, former needs flushing.
717-
*/
718705
void flush_dcache_folio(struct folio *folio)
719706
{
720-
struct address_space *mapping;
721-
722-
if (!cache_is_vipt_aliasing()) {
723-
clear_bit(PG_dc_clean, &folio->flags);
724-
return;
725-
}
726-
727-
/* don't handle anon pages here */
728-
mapping = folio_flush_mapping(folio);
729-
if (!mapping)
730-
return;
731-
732-
/*
733-
* pagecache page, file not yet mapped to userspace
734-
* Make a note that K-mapping is dirty
735-
*/
736-
if (!mapping_mapped(mapping)) {
737-
clear_bit(PG_dc_clean, &folio->flags);
738-
} else if (folio_mapped(folio)) {
739-
/* kernel reading from page with U-mapping */
740-
phys_addr_t paddr = (unsigned long)folio_address(folio);
741-
unsigned long vaddr = folio_pos(folio);
742-
743-
/*
744-
* vaddr is not actually the virtual address, but is
745-
* congruent to every user mapping.
746-
*/
747-
if (addr_not_cache_congruent(paddr, vaddr))
748-
__flush_dcache_pages(paddr, vaddr,
749-
folio_nr_pages(folio));
750-
}
707+
clear_bit(PG_dc_clean, &folio->flags);
708+
return;
751709
}
752710
EXPORT_SYMBOL(flush_dcache_folio);
753711

@@ -921,91 +879,18 @@ noinline void flush_cache_all(void)
921879

922880
}
923881

924-
#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
925-
926-
void flush_cache_mm(struct mm_struct *mm)
927-
{
928-
flush_cache_all();
929-
}
930-
931-
void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
932-
unsigned long pfn)
933-
{
934-
phys_addr_t paddr = pfn << PAGE_SHIFT;
935-
936-
u_vaddr &= PAGE_MASK;
937-
938-
__flush_dcache_pages(paddr, u_vaddr, 1);
939-
940-
if (vma->vm_flags & VM_EXEC)
941-
__inv_icache_pages(paddr, u_vaddr, 1);
942-
}
943-
944-
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
945-
unsigned long end)
946-
{
947-
flush_cache_all();
948-
}
949-
950-
void flush_anon_page(struct vm_area_struct *vma, struct page *page,
951-
unsigned long u_vaddr)
952-
{
953-
/* TBD: do we really need to clear the kernel mapping */
954-
__flush_dcache_pages((phys_addr_t)page_address(page), u_vaddr, 1);
955-
__flush_dcache_pages((phys_addr_t)page_address(page),
956-
(phys_addr_t)page_address(page), 1);
957-
958-
}
959-
960-
#endif
961-
962882
void copy_user_highpage(struct page *to, struct page *from,
963883
unsigned long u_vaddr, struct vm_area_struct *vma)
964884
{
965885
struct folio *src = page_folio(from);
966886
struct folio *dst = page_folio(to);
967887
void *kfrom = kmap_atomic(from);
968888
void *kto = kmap_atomic(to);
969-
int clean_src_k_mappings = 0;
970-
971-
/*
972-
* If SRC page was already mapped in userspace AND it's U-mapping is
973-
* not congruent with K-mapping, sync former to physical page so that
974-
* K-mapping in memcpy below, sees the right data
975-
*
976-
* Note that while @u_vaddr refers to DST page's userspace vaddr, it is
977-
* equally valid for SRC page as well
978-
*
979-
* For !VIPT cache, all of this gets compiled out as
980-
* addr_not_cache_congruent() is 0
981-
*/
982-
if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
983-
__flush_dcache_pages((unsigned long)kfrom, u_vaddr, 1);
984-
clean_src_k_mappings = 1;
985-
}
986889

987890
copy_page(kto, kfrom);
988891

989-
/*
990-
* Mark DST page K-mapping as dirty for a later finalization by
991-
* update_mmu_cache(). Although the finalization could have been done
992-
* here as well (given that both vaddr/paddr are available).
993-
* But update_mmu_cache() already has code to do that for other
994-
* non copied user pages (e.g. read faults which wire in pagecache page
995-
* directly).
996-
*/
997892
clear_bit(PG_dc_clean, &dst->flags);
998-
999-
/*
1000-
* if SRC was already usermapped and non-congruent to kernel mapping
1001-
* sync the kernel mapping back to physical page
1002-
*/
1003-
if (clean_src_k_mappings) {
1004-
__flush_dcache_pages((unsigned long)kfrom,
1005-
(unsigned long)kfrom, 1);
1006-
} else {
1007-
clear_bit(PG_dc_clean, &src->flags);
1008-
}
893+
clear_bit(PG_dc_clean, &src->flags);
1009894

1010895
kunmap_atomic(kto);
1011896
kunmap_atomic(kfrom);
@@ -1140,17 +1025,8 @@ static noinline void __init arc_cache_init_master(void)
11401025
dc->line_len, L1_CACHE_BYTES);
11411026

11421027
/* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
1143-
if (is_isa_arcompact()) {
1144-
int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
1145-
1146-
if (dc->colors > 1) {
1147-
if (!handled)
1148-
panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1149-
if (CACHE_COLORS_NUM != dc->colors)
1150-
panic("CACHE_COLORS_NUM not optimized for config\n");
1151-
} else if (handled && dc->colors == 1) {
1152-
panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1153-
}
1028+
if (is_isa_arcompact() && dc->colors > 1) {
1029+
panic("Aliasing VIPT cache not supported\n");
11541030
}
11551031
}
11561032

arch/arc/mm/mmap.c

Lines changed: 3 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,6 @@
1414

1515
#include <asm/cacheflush.h>
1616

17-
#define COLOUR_ALIGN(addr, pgoff) \
18-
((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
19-
(((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
20-
2117
/*
2218
* Ensure that shared mappings are correctly aligned to
2319
* avoid aliasing issues with VIPT caches.
@@ -31,21 +27,13 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3127
{
3228
struct mm_struct *mm = current->mm;
3329
struct vm_area_struct *vma;
34-
int do_align = 0;
35-
int aliasing = cache_is_vipt_aliasing();
3630
struct vm_unmapped_area_info info;
3731

38-
/*
39-
* We only need to do colour alignment if D cache aliases.
40-
*/
41-
if (aliasing)
42-
do_align = filp || (flags & MAP_SHARED);
43-
4432
/*
4533
* We enforce the MAP_FIXED case.
4634
*/
4735
if (flags & MAP_FIXED) {
48-
if (aliasing && flags & MAP_SHARED &&
36+
if (flags & MAP_SHARED &&
4937
(addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
5038
return -EINVAL;
5139
return addr;
@@ -55,10 +43,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
5543
return -ENOMEM;
5644

5745
if (addr) {
58-
if (do_align)
59-
addr = COLOUR_ALIGN(addr, pgoff);
60-
else
61-
addr = PAGE_ALIGN(addr);
46+
addr = PAGE_ALIGN(addr);
6247

6348
vma = find_vma(mm, addr);
6449
if (TASK_SIZE - len >= addr &&
@@ -70,7 +55,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
7055
info.length = len;
7156
info.low_limit = mm->mmap_base;
7257
info.high_limit = TASK_SIZE;
73-
info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
58+
info.align_mask = 0;
7459
info.align_offset = pgoff << PAGE_SHIFT;
7560
return vm_unmapped_area(&info);
7661
}

arch/arc/mm/tlb.c

Lines changed: 5 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -478,21 +478,15 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
478478

479479
create_tlb(vma, vaddr, ptep);
480480

481-
if (page == ZERO_PAGE(0)) {
481+
if (page == ZERO_PAGE(0))
482482
return;
483-
}
484483

485484
/*
486-
* Exec page : Independent of aliasing/page-color considerations,
487-
* since icache doesn't snoop dcache on ARC, any dirty
488-
* K-mapping of a code page needs to be wback+inv so that
489-
* icache fetch by userspace sees code correctly.
490-
* !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
491-
* so userspace sees the right data.
492-
* (Avoids the flush for Non-exec + congruent mapping case)
485+
* For executable pages, since icache doesn't snoop dcache, any
486+
* dirty K-mapping of a code page needs to be wback+inv so that
487+
* icache fetch by userspace sees code correctly.
493488
*/
494-
if ((vma->vm_flags & VM_EXEC) ||
495-
addr_not_cache_congruent(paddr, vaddr)) {
489+
if (vma->vm_flags & VM_EXEC) {
496490
struct folio *folio = page_folio(page);
497491
int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
498492
if (dirty) {

0 commit comments

Comments
 (0)