@@ -976,14 +976,15 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
976976 return 0 ;
977977}
978978
979-
979+ #ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
980980bool vmemmap_can_optimize (struct vmem_altmap * altmap , struct dev_pagemap * pgmap )
981981{
982982 if (radix_enabled ())
983983 return __vmemmap_can_optimize (altmap , pgmap );
984984
985985 return false;
986986}
987+ #endif
987988
988989int __meminit vmemmap_check_pmd (pmd_t * pmdp , int node ,
989990 unsigned long addr , unsigned long next )
@@ -1120,6 +1121,19 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
11201121 pmd_t * pmd ;
11211122 pte_t * pte ;
11221123
1124+ /*
1125+ * Make sure we align the start vmemmap addr so that we calculate
1126+ * the correct start_pfn in altmap boundary check to decided whether
1127+ * we should use altmap or RAM based backing memory allocation. Also
1128+ * the address need to be aligned for set_pte operation.
1129+
1130+ * If the start addr is already PMD_SIZE aligned we will try to use
1131+ * a pmd mapping. We don't want to be too aggressive here beacause
1132+ * that will cause more allocations in RAM. So only if the namespace
1133+ * vmemmap start addr is PMD_SIZE aligned we will use PMD mapping.
1134+ */
1135+
1136+ start = ALIGN_DOWN (start , PAGE_SIZE );
11231137 for (addr = start ; addr < end ; addr = next ) {
11241138 next = pmd_addr_end (addr , end );
11251139
@@ -1145,8 +1159,8 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
11451159 * in altmap block allocation failures, in which case
11461160 * we fallback to RAM for vmemmap allocation.
11471161 */
1148- if (altmap && ( !IS_ALIGNED (addr , PMD_SIZE ) ||
1149- altmap_cross_boundary (altmap , addr , PMD_SIZE ))) {
1162+ if (!IS_ALIGNED (addr , PMD_SIZE ) || ( altmap &&
1163+ altmap_cross_boundary (altmap , addr , PMD_SIZE ))) {
11501164 /*
11511165 * make sure we don't create altmap mappings
11521166 * covering things outside the device.
0 commit comments