Skip to content

Commit 2c114df

Browse files
davidhildenbrandhcahca
authored andcommitted
s390/vmemmap: avoid memset(PAGE_UNUSED) when adding consecutive sections
Let's avoid memset(PAGE_UNUSED) when adding consecutive sections, whereby the vmemmap of a single section does not span full PMDs. Cc: Vasily Gorbik <[email protected]> Cc: Christian Borntraeger <[email protected]> Cc: Gerald Schaefer <[email protected]> Signed-off-by: David Hildenbrand <[email protected]> Message-Id: <[email protected]> Signed-off-by: Heiko Carstens <[email protected]>
1 parent cd5781d commit 2c114df

File tree

1 file changed

+42
-3
lines changed

1 file changed

+42
-3
lines changed

arch/s390/mm/vmem.c

Lines changed: 42 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,22 @@ static void vmem_pte_free(unsigned long *table)
7474

7575
#define PAGE_UNUSED 0xFD
7676

77-
static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
77+
/*
78+
* The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
79+
* from unused_pmd_start to next PMD_SIZE boundary.
80+
*/
81+
static unsigned long unused_pmd_start;
82+
83+
static void vmemmap_flush_unused_pmd(void)
84+
{
85+
if (!unused_pmd_start)
86+
return;
87+
memset(__va(unused_pmd_start), PAGE_UNUSED,
88+
ALIGN(unused_pmd_start, PMD_SIZE) - unused_pmd_start);
89+
unused_pmd_start = 0;
90+
}
91+
92+
static void __vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
7893
{
7994
/*
8095
* As we expect to add in the same granularity as we remove, it's
@@ -85,25 +100,49 @@ static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
85100
memset(__va(start), 0, sizeof(struct page));
86101
}
87102

103+
static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
104+
{
105+
/*
106+
* We only optimize if the new used range directly follows the
107+
* previously unused range (esp., when populating consecutive sections).
108+
*/
109+
if (unused_pmd_start == start) {
110+
unused_pmd_start = end;
111+
if (likely(IS_ALIGNED(unused_pmd_start, PMD_SIZE)))
112+
unused_pmd_start = 0;
113+
return;
114+
}
115+
vmemmap_flush_unused_pmd();
116+
__vmemmap_use_sub_pmd(start, end);
117+
}
118+
88119
static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
89120
{
90121
void *page = __va(ALIGN_DOWN(start, PMD_SIZE));
91122

123+
vmemmap_flush_unused_pmd();
124+
92125
/* Could be our memmap page is filled with PAGE_UNUSED already ... */
93-
vmemmap_use_sub_pmd(start, end);
126+
__vmemmap_use_sub_pmd(start, end);
94127

95128
/* Mark the unused parts of the new memmap page PAGE_UNUSED. */
96129
if (!IS_ALIGNED(start, PMD_SIZE))
97130
memset(page, PAGE_UNUSED, start - __pa(page));
131+
/*
132+
* We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
133+
* consecutive sections. Remember for the last added PMD the last
134+
* unused range in the populated PMD.
135+
*/
98136
if (!IS_ALIGNED(end, PMD_SIZE))
99-
memset(__va(end), PAGE_UNUSED, __pa(page) + PMD_SIZE - end);
137+
unused_pmd_start = end;
100138
}
101139

102140
/* Returns true if the PMD is completely unused and can be freed. */
103141
static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
104142
{
105143
void *page = __va(ALIGN_DOWN(start, PMD_SIZE));
106144

145+
vmemmap_flush_unused_pmd();
107146
memset(__va(start), PAGE_UNUSED, end - start);
108147
return !memchr_inv(page, PAGE_UNUSED, PMD_SIZE);
109148
}

0 commit comments

Comments
 (0)