@@ -74,7 +74,22 @@ static void vmem_pte_free(unsigned long *table)
74
74
75
75
#define PAGE_UNUSED 0xFD
76
76
77
- static void vmemmap_use_sub_pmd (unsigned long start , unsigned long end )
77
+ /*
78
+ * The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
79
+ * from unused_pmd_start to next PMD_SIZE boundary.
80
+ */
81
+ static unsigned long unused_pmd_start ;
82
+
83
+ static void vmemmap_flush_unused_pmd (void )
84
+ {
85
+ if (!unused_pmd_start )
86
+ return ;
87
+ memset (__va (unused_pmd_start ), PAGE_UNUSED ,
88
+ ALIGN (unused_pmd_start , PMD_SIZE ) - unused_pmd_start );
89
+ unused_pmd_start = 0 ;
90
+ }
91
+
92
+ static void __vmemmap_use_sub_pmd (unsigned long start , unsigned long end )
78
93
{
79
94
/*
80
95
* As we expect to add in the same granularity as we remove, it's
@@ -85,25 +100,49 @@ static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
85
100
memset (__va (start ), 0 , sizeof (struct page ));
86
101
}
87
102
103
+ static void vmemmap_use_sub_pmd (unsigned long start , unsigned long end )
104
+ {
105
+ /*
106
+ * We only optimize if the new used range directly follows the
107
+ * previously unused range (esp., when populating consecutive sections).
108
+ */
109
+ if (unused_pmd_start == start ) {
110
+ unused_pmd_start = end ;
111
+ if (likely (IS_ALIGNED (unused_pmd_start , PMD_SIZE )))
112
+ unused_pmd_start = 0 ;
113
+ return ;
114
+ }
115
+ vmemmap_flush_unused_pmd ();
116
+ __vmemmap_use_sub_pmd (start , end );
117
+ }
118
+
88
119
static void vmemmap_use_new_sub_pmd (unsigned long start , unsigned long end )
89
120
{
90
121
void * page = __va (ALIGN_DOWN (start , PMD_SIZE ));
91
122
123
+ vmemmap_flush_unused_pmd ();
124
+
92
125
/* Could be our memmap page is filled with PAGE_UNUSED already ... */
93
- vmemmap_use_sub_pmd (start , end );
126
+ __vmemmap_use_sub_pmd (start , end );
94
127
95
128
/* Mark the unused parts of the new memmap page PAGE_UNUSED. */
96
129
if (!IS_ALIGNED (start , PMD_SIZE ))
97
130
memset (page , PAGE_UNUSED , start - __pa (page ));
131
+ /*
132
+ * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
133
+ * consecutive sections. Remember for the last added PMD the last
134
+ * unused range in the populated PMD.
135
+ */
98
136
if (!IS_ALIGNED (end , PMD_SIZE ))
99
- memset ( __va ( end ), PAGE_UNUSED , __pa ( page ) + PMD_SIZE - end ) ;
137
+ unused_pmd_start = end ;
100
138
}
101
139
102
140
/* Returns true if the PMD is completely unused and can be freed. */
103
141
static bool vmemmap_unuse_sub_pmd (unsigned long start , unsigned long end )
104
142
{
105
143
void * page = __va (ALIGN_DOWN (start , PMD_SIZE ));
106
144
145
+ vmemmap_flush_unused_pmd ();
107
146
memset (__va (start ), PAGE_UNUSED , end - start );
108
147
return !memchr_inv (page , PAGE_UNUSED , PMD_SIZE );
109
148
}
0 commit comments