Skip to content

Commit b521c43

Browse files
Christoph Hellwigtorvalds
authored andcommitted
mm: remove vmap_page_range_noflush and vunmap_page_range
These have non-static aliases called map_kernel_range_noflush and unmap_kernel_range_noflush that just differ slightly in the calling conventions that pass addr + size instead of an end. Signed-off-by: Christoph Hellwig <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Acked-by: Peter Zijlstra (Intel) <[email protected]> Cc: Christian Borntraeger <[email protected]> Cc: Christophe Leroy <[email protected]> Cc: Daniel Vetter <[email protected]> Cc: David Airlie <[email protected]> Cc: Gao Xiang <[email protected]> Cc: Greg Kroah-Hartman <[email protected]> Cc: Haiyang Zhang <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: "K. Y. Srinivasan" <[email protected]> Cc: Laura Abbott <[email protected]> Cc: Mark Rutland <[email protected]> Cc: Michael Kelley <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Nitin Gupta <[email protected]> Cc: Robin Murphy <[email protected]> Cc: Sakari Ailus <[email protected]> Cc: Stephen Hemminger <[email protected]> Cc: Sumit Semwal <[email protected]> Cc: Wei Liu <[email protected]> Cc: Benjamin Herrenschmidt <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Heiko Carstens <[email protected]> Cc: Paul Mackerras <[email protected]> Cc: Vasily Gorbik <[email protected]> Cc: Will Deacon <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent 78a0e8c commit b521c43

File tree

1 file changed

+40
-58
lines changed

1 file changed

+40
-58
lines changed

mm/vmalloc.c

Lines changed: 40 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -128,10 +128,24 @@ static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
128128
} while (p4d++, addr = next, addr != end);
129129
}
130130

131-
static void vunmap_page_range(unsigned long addr, unsigned long end)
131+
/**
132+
* unmap_kernel_range_noflush - unmap kernel VM area
133+
* @addr: start of the VM area to unmap
134+
* @size: size of the VM area to unmap
135+
*
136+
* Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size specify
137+
* should have been allocated using get_vm_area() and its friends.
138+
*
139+
* NOTE:
140+
* This function does NOT do any cache flushing. The caller is responsible
141+
* for calling flush_cache_vunmap() on to-be-mapped areas before calling this
142+
* function and flush_tlb_kernel_range() after.
143+
*/
144+
void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
132145
{
133-
pgd_t *pgd;
146+
unsigned long end = addr + size;
134147
unsigned long next;
148+
pgd_t *pgd;
135149

136150
BUG_ON(addr >= end);
137151
pgd = pgd_offset_k(addr);
@@ -220,18 +234,30 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
220234
return 0;
221235
}
222236

223-
/*
224-
* Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
225-
* will have pfns corresponding to the "pages" array.
237+
/**
238+
* map_kernel_range_noflush - map kernel VM area with the specified pages
239+
* @addr: start of the VM area to map
240+
* @size: size of the VM area to map
241+
* @prot: page protection flags to use
242+
* @pages: pages to map
226243
*
227-
* Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
244+
* Map PFN_UP(@size) pages at @addr. The VM area @addr and @size specify should
245+
* have been allocated using get_vm_area() and its friends.
246+
*
247+
* NOTE:
248+
* This function does NOT do any cache flushing. The caller is responsible for
249+
* calling flush_cache_vmap() on to-be-mapped areas before calling this
250+
* function.
251+
*
252+
* RETURNS:
253+
* The number of pages mapped on success, -errno on failure.
228254
*/
229-
static int vmap_page_range_noflush(unsigned long start, unsigned long end,
230-
pgprot_t prot, struct page **pages)
255+
int map_kernel_range_noflush(unsigned long addr, unsigned long size,
256+
pgprot_t prot, struct page **pages)
231257
{
232-
pgd_t *pgd;
258+
unsigned long end = addr + size;
233259
unsigned long next;
234-
unsigned long addr = start;
260+
pgd_t *pgd;
235261
int err = 0;
236262
int nr = 0;
237263

@@ -252,7 +278,7 @@ static int vmap_page_range(unsigned long start, unsigned long end,
252278
{
253279
int ret;
254280

255-
ret = vmap_page_range_noflush(start, end, prot, pages);
281+
ret = map_kernel_range_noflush(start, end - start, prot, pages);
256282
flush_cache_vmap(start, end);
257283
return ret;
258284
}
@@ -1227,7 +1253,7 @@ EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
12271253
*/
12281254
static void unmap_vmap_area(struct vmap_area *va)
12291255
{
1230-
vunmap_page_range(va->va_start, va->va_end);
1256+
unmap_kernel_range_noflush(va->va_start, va->va_end - va->va_start);
12311257
}
12321258

12331259
/*
@@ -1687,7 +1713,7 @@ static void vb_free(unsigned long addr, unsigned long size)
16871713
rcu_read_unlock();
16881714
BUG_ON(!vb);
16891715

1690-
vunmap_page_range(addr, addr + size);
1716+
unmap_kernel_range_noflush(addr, size);
16911717

16921718
if (debug_pagealloc_enabled_static())
16931719
flush_tlb_kernel_range(addr, addr + size);
@@ -1985,50 +2011,6 @@ void __init vmalloc_init(void)
19852011
vmap_initialized = true;
19862012
}
19872013

1988-
/**
1989-
* map_kernel_range_noflush - map kernel VM area with the specified pages
1990-
* @addr: start of the VM area to map
1991-
* @size: size of the VM area to map
1992-
* @prot: page protection flags to use
1993-
* @pages: pages to map
1994-
*
1995-
* Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
1996-
* specify should have been allocated using get_vm_area() and its
1997-
* friends.
1998-
*
1999-
* NOTE:
2000-
* This function does NOT do any cache flushing. The caller is
2001-
* responsible for calling flush_cache_vmap() on to-be-mapped areas
2002-
* before calling this function.
2003-
*
2004-
* RETURNS:
2005-
* The number of pages mapped on success, -errno on failure.
2006-
*/
2007-
int map_kernel_range_noflush(unsigned long addr, unsigned long size,
2008-
pgprot_t prot, struct page **pages)
2009-
{
2010-
return vmap_page_range_noflush(addr, addr + size, prot, pages);
2011-
}
2012-
2013-
/**
2014-
* unmap_kernel_range_noflush - unmap kernel VM area
2015-
* @addr: start of the VM area to unmap
2016-
* @size: size of the VM area to unmap
2017-
*
2018-
* Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
2019-
* specify should have been allocated using get_vm_area() and its
2020-
* friends.
2021-
*
2022-
* NOTE:
2023-
* This function does NOT do any cache flushing. The caller is
2024-
* responsible for calling flush_cache_vunmap() on to-be-mapped areas
2025-
* before calling this function and flush_tlb_kernel_range() after.
2026-
*/
2027-
void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
2028-
{
2029-
vunmap_page_range(addr, addr + size);
2030-
}
2031-
20322014
/**
20332015
* unmap_kernel_range - unmap kernel VM area and flush cache and TLB
20342016
* @addr: start of the VM area to unmap
@@ -2042,7 +2024,7 @@ void unmap_kernel_range(unsigned long addr, unsigned long size)
20422024
unsigned long end = addr + size;
20432025

20442026
flush_cache_vunmap(addr, end);
2045-
vunmap_page_range(addr, end);
2027+
unmap_kernel_range_noflush(addr, size);
20462028
flush_tlb_kernel_range(addr, end);
20472029
}
20482030

0 commit comments

Comments
 (0)