@@ -128,10 +128,24 @@ static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
128
128
} while (p4d ++ , addr = next , addr != end );
129
129
}
130
130
131
- static void vunmap_page_range (unsigned long addr , unsigned long end )
131
+ /**
132
+ * unmap_kernel_range_noflush - unmap kernel VM area
133
+ * @addr: start of the VM area to unmap
134
+ * @size: size of the VM area to unmap
135
+ *
136
+ * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size specify
137
+ * should have been allocated using get_vm_area() and its friends.
138
+ *
139
+ * NOTE:
140
+ * This function does NOT do any cache flushing. The caller is responsible
141
+ * for calling flush_cache_vunmap() on to-be-mapped areas before calling this
142
+ * function and flush_tlb_kernel_range() after.
143
+ */
144
+ void unmap_kernel_range_noflush (unsigned long addr , unsigned long size )
132
145
{
133
- pgd_t * pgd ;
146
+ unsigned long end = addr + size ;
134
147
unsigned long next ;
148
+ pgd_t * pgd ;
135
149
136
150
BUG_ON (addr >= end );
137
151
pgd = pgd_offset_k (addr );
@@ -220,18 +234,30 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
220
234
return 0 ;
221
235
}
222
236
223
- /*
224
- * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
225
- * will have pfns corresponding to the "pages" array.
237
+ /**
238
+ * map_kernel_range_noflush - map kernel VM area with the specified pages
239
+ * @addr: start of the VM area to map
240
+ * @size: size of the VM area to map
241
+ * @prot: page protection flags to use
242
+ * @pages: pages to map
226
243
*
227
- * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
244
+ * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size specify should
245
+ * have been allocated using get_vm_area() and its friends.
246
+ *
247
+ * NOTE:
248
+ * This function does NOT do any cache flushing. The caller is responsible for
249
+ * calling flush_cache_vmap() on to-be-mapped areas before calling this
250
+ * function.
251
+ *
252
+ * RETURNS:
253
+ * The number of pages mapped on success, -errno on failure.
228
254
*/
229
- static int vmap_page_range_noflush (unsigned long start , unsigned long end ,
230
- pgprot_t prot , struct page * * pages )
255
+ int map_kernel_range_noflush (unsigned long addr , unsigned long size ,
256
+ pgprot_t prot , struct page * * pages )
231
257
{
232
- pgd_t * pgd ;
258
+ unsigned long end = addr + size ;
233
259
unsigned long next ;
234
- unsigned long addr = start ;
260
+ pgd_t * pgd ;
235
261
int err = 0 ;
236
262
int nr = 0 ;
237
263
@@ -252,7 +278,7 @@ static int vmap_page_range(unsigned long start, unsigned long end,
252
278
{
253
279
int ret ;
254
280
255
- ret = vmap_page_range_noflush (start , end , prot , pages );
281
+ ret = map_kernel_range_noflush (start , end - start , prot , pages );
256
282
flush_cache_vmap (start , end );
257
283
return ret ;
258
284
}
@@ -1227,7 +1253,7 @@ EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
1227
1253
*/
1228
1254
static void unmap_vmap_area (struct vmap_area * va )
1229
1255
{
1230
- vunmap_page_range (va -> va_start , va -> va_end );
1256
+ unmap_kernel_range_noflush (va -> va_start , va -> va_end - va -> va_start );
1231
1257
}
1232
1258
1233
1259
/*
@@ -1687,7 +1713,7 @@ static void vb_free(unsigned long addr, unsigned long size)
1687
1713
rcu_read_unlock ();
1688
1714
BUG_ON (!vb );
1689
1715
1690
- vunmap_page_range (addr , addr + size );
1716
+ unmap_kernel_range_noflush (addr , size );
1691
1717
1692
1718
if (debug_pagealloc_enabled_static ())
1693
1719
flush_tlb_kernel_range (addr , addr + size );
@@ -1985,50 +2011,6 @@ void __init vmalloc_init(void)
1985
2011
vmap_initialized = true;
1986
2012
}
1987
2013
1988
- /**
1989
- * map_kernel_range_noflush - map kernel VM area with the specified pages
1990
- * @addr: start of the VM area to map
1991
- * @size: size of the VM area to map
1992
- * @prot: page protection flags to use
1993
- * @pages: pages to map
1994
- *
1995
- * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
1996
- * specify should have been allocated using get_vm_area() and its
1997
- * friends.
1998
- *
1999
- * NOTE:
2000
- * This function does NOT do any cache flushing. The caller is
2001
- * responsible for calling flush_cache_vmap() on to-be-mapped areas
2002
- * before calling this function.
2003
- *
2004
- * RETURNS:
2005
- * The number of pages mapped on success, -errno on failure.
2006
- */
2007
- int map_kernel_range_noflush (unsigned long addr , unsigned long size ,
2008
- pgprot_t prot , struct page * * pages )
2009
- {
2010
- return vmap_page_range_noflush (addr , addr + size , prot , pages );
2011
- }
2012
-
2013
- /**
2014
- * unmap_kernel_range_noflush - unmap kernel VM area
2015
- * @addr: start of the VM area to unmap
2016
- * @size: size of the VM area to unmap
2017
- *
2018
- * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
2019
- * specify should have been allocated using get_vm_area() and its
2020
- * friends.
2021
- *
2022
- * NOTE:
2023
- * This function does NOT do any cache flushing. The caller is
2024
- * responsible for calling flush_cache_vunmap() on to-be-mapped areas
2025
- * before calling this function and flush_tlb_kernel_range() after.
2026
- */
2027
- void unmap_kernel_range_noflush (unsigned long addr , unsigned long size )
2028
- {
2029
- vunmap_page_range (addr , addr + size );
2030
- }
2031
-
2032
2014
/**
2033
2015
* unmap_kernel_range - unmap kernel VM area and flush cache and TLB
2034
2016
* @addr: start of the VM area to unmap
@@ -2042,7 +2024,7 @@ void unmap_kernel_range(unsigned long addr, unsigned long size)
2042
2024
unsigned long end = addr + size ;
2043
2025
2044
2026
flush_cache_vunmap (addr , end );
2045
- vunmap_page_range (addr , end );
2027
+ unmap_kernel_range_noflush (addr , size );
2046
2028
flush_tlb_kernel_range (addr , end );
2047
2029
}
2048
2030
0 commit comments