@@ -145,10 +145,9 @@ int arc_cache_mumbojumbo(int c, char *buf, int len)
145
145
p_dc -> sz_k = 1 << (dbcr .sz - 1 );
146
146
147
147
n += scnprintf (buf + n , len - n ,
148
- "D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s%s \n" ,
148
+ "D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s\n" ,
149
149
p_dc -> sz_k , assoc , p_dc -> line_len ,
150
150
vipt ? "VIPT" : "PIPT" ,
151
- p_dc -> colors > 1 ? " aliasing" : "" ,
152
151
IS_USED_CFG (CONFIG_ARC_HAS_DCACHE ));
153
152
154
153
slc_chk :
@@ -703,51 +702,10 @@ static inline void arc_slc_enable(void)
703
702
* Exported APIs
704
703
*/
705
704
706
- /*
707
- * Handle cache congruency of kernel and userspace mappings of page when kernel
708
- * writes-to/reads-from
709
- *
710
- * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
711
- * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
712
- * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
713
- * -In SMP, if hardware caches are coherent
714
- *
715
- * There's a corollary case, where kernel READs from a userspace mapped page.
716
- * If the U-mapping is not congruent to K-mapping, former needs flushing.
717
- */
718
705
void flush_dcache_folio (struct folio * folio )
719
706
{
720
- struct address_space * mapping ;
721
-
722
- if (!cache_is_vipt_aliasing ()) {
723
- clear_bit (PG_dc_clean , & folio -> flags );
724
- return ;
725
- }
726
-
727
- /* don't handle anon pages here */
728
- mapping = folio_flush_mapping (folio );
729
- if (!mapping )
730
- return ;
731
-
732
- /*
733
- * pagecache page, file not yet mapped to userspace
734
- * Make a note that K-mapping is dirty
735
- */
736
- if (!mapping_mapped (mapping )) {
737
- clear_bit (PG_dc_clean , & folio -> flags );
738
- } else if (folio_mapped (folio )) {
739
- /* kernel reading from page with U-mapping */
740
- phys_addr_t paddr = (unsigned long )folio_address (folio );
741
- unsigned long vaddr = folio_pos (folio );
742
-
743
- /*
744
- * vaddr is not actually the virtual address, but is
745
- * congruent to every user mapping.
746
- */
747
- if (addr_not_cache_congruent (paddr , vaddr ))
748
- __flush_dcache_pages (paddr , vaddr ,
749
- folio_nr_pages (folio ));
750
- }
707
+ clear_bit (PG_dc_clean , & folio -> flags );
708
+ return ;
751
709
}
752
710
EXPORT_SYMBOL (flush_dcache_folio );
753
711
@@ -921,91 +879,18 @@ noinline void flush_cache_all(void)
921
879
922
880
}
923
881
924
- #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
925
-
926
- void flush_cache_mm (struct mm_struct * mm )
927
- {
928
- flush_cache_all ();
929
- }
930
-
931
- void flush_cache_page (struct vm_area_struct * vma , unsigned long u_vaddr ,
932
- unsigned long pfn )
933
- {
934
- phys_addr_t paddr = pfn << PAGE_SHIFT ;
935
-
936
- u_vaddr &= PAGE_MASK ;
937
-
938
- __flush_dcache_pages (paddr , u_vaddr , 1 );
939
-
940
- if (vma -> vm_flags & VM_EXEC )
941
- __inv_icache_pages (paddr , u_vaddr , 1 );
942
- }
943
-
944
- void flush_cache_range (struct vm_area_struct * vma , unsigned long start ,
945
- unsigned long end )
946
- {
947
- flush_cache_all ();
948
- }
949
-
950
- void flush_anon_page (struct vm_area_struct * vma , struct page * page ,
951
- unsigned long u_vaddr )
952
- {
953
- /* TBD: do we really need to clear the kernel mapping */
954
- __flush_dcache_pages ((phys_addr_t )page_address (page ), u_vaddr , 1 );
955
- __flush_dcache_pages ((phys_addr_t )page_address (page ),
956
- (phys_addr_t )page_address (page ), 1 );
957
-
958
- }
959
-
960
- #endif
961
-
962
882
void copy_user_highpage (struct page * to , struct page * from ,
963
883
unsigned long u_vaddr , struct vm_area_struct * vma )
964
884
{
965
885
struct folio * src = page_folio (from );
966
886
struct folio * dst = page_folio (to );
967
887
void * kfrom = kmap_atomic (from );
968
888
void * kto = kmap_atomic (to );
969
- int clean_src_k_mappings = 0 ;
970
-
971
- /*
972
- * If SRC page was already mapped in userspace AND it's U-mapping is
973
- * not congruent with K-mapping, sync former to physical page so that
974
- * K-mapping in memcpy below, sees the right data
975
- *
976
- * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
977
- * equally valid for SRC page as well
978
- *
979
- * For !VIPT cache, all of this gets compiled out as
980
- * addr_not_cache_congruent() is 0
981
- */
982
- if (page_mapcount (from ) && addr_not_cache_congruent (kfrom , u_vaddr )) {
983
- __flush_dcache_pages ((unsigned long )kfrom , u_vaddr , 1 );
984
- clean_src_k_mappings = 1 ;
985
- }
986
889
987
890
copy_page (kto , kfrom );
988
891
989
- /*
990
- * Mark DST page K-mapping as dirty for a later finalization by
991
- * update_mmu_cache(). Although the finalization could have been done
992
- * here as well (given that both vaddr/paddr are available).
993
- * But update_mmu_cache() already has code to do that for other
994
- * non copied user pages (e.g. read faults which wire in pagecache page
995
- * directly).
996
- */
997
892
clear_bit (PG_dc_clean , & dst -> flags );
998
-
999
- /*
1000
- * if SRC was already usermapped and non-congruent to kernel mapping
1001
- * sync the kernel mapping back to physical page
1002
- */
1003
- if (clean_src_k_mappings ) {
1004
- __flush_dcache_pages ((unsigned long )kfrom ,
1005
- (unsigned long )kfrom , 1 );
1006
- } else {
1007
- clear_bit (PG_dc_clean , & src -> flags );
1008
- }
893
+ clear_bit (PG_dc_clean , & src -> flags );
1009
894
1010
895
kunmap_atomic (kto );
1011
896
kunmap_atomic (kfrom );
@@ -1140,17 +1025,8 @@ static noinline void __init arc_cache_init_master(void)
1140
1025
dc -> line_len , L1_CACHE_BYTES );
1141
1026
1142
1027
/* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
1143
- if (is_isa_arcompact ()) {
1144
- int handled = IS_ENABLED (CONFIG_ARC_CACHE_VIPT_ALIASING );
1145
-
1146
- if (dc -> colors > 1 ) {
1147
- if (!handled )
1148
- panic ("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n" );
1149
- if (CACHE_COLORS_NUM != dc -> colors )
1150
- panic ("CACHE_COLORS_NUM not optimized for config\n" );
1151
- } else if (handled && dc -> colors == 1 ) {
1152
- panic ("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n" );
1153
- }
1028
+ if (is_isa_arcompact () && dc -> colors > 1 ) {
1029
+ panic ("Aliasing VIPT cache not supported\n" );
1154
1030
}
1155
1031
}
1156
1032
0 commit comments