@@ -2021,26 +2021,34 @@ EXPORT_SYMBOL(vm_iomap_memory);
2021
2021
2022
2022
static int apply_to_pte_range (struct mm_struct * mm , pmd_t * pmd ,
2023
2023
unsigned long addr , unsigned long end ,
2024
- pte_fn_t fn , void * data )
2024
+ pte_fn_t fn , void * data , bool create )
2025
2025
{
2026
2026
pte_t * pte ;
2027
- int err ;
2027
+ int err = 0 ;
2028
2028
spinlock_t * uninitialized_var (ptl );
2029
2029
2030
- pte = (mm == & init_mm ) ?
2031
- pte_alloc_kernel (pmd , addr ) :
2032
- pte_alloc_map_lock (mm , pmd , addr , & ptl );
2033
- if (!pte )
2034
- return - ENOMEM ;
2030
+ if (create ) {
2031
+ pte = (mm == & init_mm ) ?
2032
+ pte_alloc_kernel (pmd , addr ) :
2033
+ pte_alloc_map_lock (mm , pmd , addr , & ptl );
2034
+ if (!pte )
2035
+ return - ENOMEM ;
2036
+ } else {
2037
+ pte = (mm == & init_mm ) ?
2038
+ pte_offset_kernel (pmd , addr ) :
2039
+ pte_offset_map_lock (mm , pmd , addr , & ptl );
2040
+ }
2035
2041
2036
2042
BUG_ON (pmd_huge (* pmd ));
2037
2043
2038
2044
arch_enter_lazy_mmu_mode ();
2039
2045
2040
2046
do {
2041
- err = fn (pte ++ , addr , data );
2042
- if (err )
2043
- break ;
2047
+ if (create || !pte_none (* pte )) {
2048
+ err = fn (pte ++ , addr , data );
2049
+ if (err )
2050
+ break ;
2051
+ }
2044
2052
} while (addr += PAGE_SIZE , addr != end );
2045
2053
2046
2054
arch_leave_lazy_mmu_mode ();
@@ -2052,93 +2060,137 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2052
2060
2053
2061
static int apply_to_pmd_range (struct mm_struct * mm , pud_t * pud ,
2054
2062
unsigned long addr , unsigned long end ,
2055
- pte_fn_t fn , void * data )
2063
+ pte_fn_t fn , void * data , bool create )
2056
2064
{
2057
2065
pmd_t * pmd ;
2058
2066
unsigned long next ;
2059
- int err ;
2067
+ int err = 0 ;
2060
2068
2061
2069
BUG_ON (pud_huge (* pud ));
2062
2070
2063
- pmd = pmd_alloc (mm , pud , addr );
2064
- if (!pmd )
2065
- return - ENOMEM ;
2071
+ if (create ) {
2072
+ pmd = pmd_alloc (mm , pud , addr );
2073
+ if (!pmd )
2074
+ return - ENOMEM ;
2075
+ } else {
2076
+ pmd = pmd_offset (pud , addr );
2077
+ }
2066
2078
do {
2067
2079
next = pmd_addr_end (addr , end );
2068
- err = apply_to_pte_range (mm , pmd , addr , next , fn , data );
2069
- if (err )
2070
- break ;
2080
+ if (create || !pmd_none_or_clear_bad (pmd )) {
2081
+ err = apply_to_pte_range (mm , pmd , addr , next , fn , data ,
2082
+ create );
2083
+ if (err )
2084
+ break ;
2085
+ }
2071
2086
} while (pmd ++ , addr = next , addr != end );
2072
2087
return err ;
2073
2088
}
2074
2089
2075
2090
static int apply_to_pud_range (struct mm_struct * mm , p4d_t * p4d ,
2076
2091
unsigned long addr , unsigned long end ,
2077
- pte_fn_t fn , void * data )
2092
+ pte_fn_t fn , void * data , bool create )
2078
2093
{
2079
2094
pud_t * pud ;
2080
2095
unsigned long next ;
2081
- int err ;
2096
+ int err = 0 ;
2082
2097
2083
- pud = pud_alloc (mm , p4d , addr );
2084
- if (!pud )
2085
- return - ENOMEM ;
2098
+ if (create ) {
2099
+ pud = pud_alloc (mm , p4d , addr );
2100
+ if (!pud )
2101
+ return - ENOMEM ;
2102
+ } else {
2103
+ pud = pud_offset (p4d , addr );
2104
+ }
2086
2105
do {
2087
2106
next = pud_addr_end (addr , end );
2088
- err = apply_to_pmd_range (mm , pud , addr , next , fn , data );
2089
- if (err )
2090
- break ;
2107
+ if (create || !pud_none_or_clear_bad (pud )) {
2108
+ err = apply_to_pmd_range (mm , pud , addr , next , fn , data ,
2109
+ create );
2110
+ if (err )
2111
+ break ;
2112
+ }
2091
2113
} while (pud ++ , addr = next , addr != end );
2092
2114
return err ;
2093
2115
}
2094
2116
2095
2117
static int apply_to_p4d_range (struct mm_struct * mm , pgd_t * pgd ,
2096
2118
unsigned long addr , unsigned long end ,
2097
- pte_fn_t fn , void * data )
2119
+ pte_fn_t fn , void * data , bool create )
2098
2120
{
2099
2121
p4d_t * p4d ;
2100
2122
unsigned long next ;
2101
- int err ;
2123
+ int err = 0 ;
2102
2124
2103
- p4d = p4d_alloc (mm , pgd , addr );
2104
- if (!p4d )
2105
- return - ENOMEM ;
2125
+ if (create ) {
2126
+ p4d = p4d_alloc (mm , pgd , addr );
2127
+ if (!p4d )
2128
+ return - ENOMEM ;
2129
+ } else {
2130
+ p4d = p4d_offset (pgd , addr );
2131
+ }
2106
2132
do {
2107
2133
next = p4d_addr_end (addr , end );
2108
- err = apply_to_pud_range (mm , p4d , addr , next , fn , data );
2109
- if (err )
2110
- break ;
2134
+ if (create || !p4d_none_or_clear_bad (p4d )) {
2135
+ err = apply_to_pud_range (mm , p4d , addr , next , fn , data ,
2136
+ create );
2137
+ if (err )
2138
+ break ;
2139
+ }
2111
2140
} while (p4d ++ , addr = next , addr != end );
2112
2141
return err ;
2113
2142
}
2114
2143
2115
- /*
2116
- * Scan a region of virtual memory, filling in page tables as necessary
2117
- * and calling a provided function on each leaf page table.
2118
- */
2119
- int apply_to_page_range (struct mm_struct * mm , unsigned long addr ,
2120
- unsigned long size , pte_fn_t fn , void * data )
2144
+ static int __apply_to_page_range (struct mm_struct * mm , unsigned long addr ,
2145
+ unsigned long size , pte_fn_t fn ,
2146
+ void * data , bool create )
2121
2147
{
2122
2148
pgd_t * pgd ;
2123
2149
unsigned long next ;
2124
2150
unsigned long end = addr + size ;
2125
- int err ;
2151
+ int err = 0 ;
2126
2152
2127
2153
if (WARN_ON (addr >= end ))
2128
2154
return - EINVAL ;
2129
2155
2130
2156
pgd = pgd_offset (mm , addr );
2131
2157
do {
2132
2158
next = pgd_addr_end (addr , end );
2133
- err = apply_to_p4d_range (mm , pgd , addr , next , fn , data );
2159
+ if (!create && pgd_none_or_clear_bad (pgd ))
2160
+ continue ;
2161
+ err = apply_to_p4d_range (mm , pgd , addr , next , fn , data , create );
2134
2162
if (err )
2135
2163
break ;
2136
2164
} while (pgd ++ , addr = next , addr != end );
2137
2165
2138
2166
return err ;
2139
2167
}
2168
+
2169
+ /*
2170
+ * Scan a region of virtual memory, filling in page tables as necessary
2171
+ * and calling a provided function on each leaf page table.
2172
+ */
2173
+ int apply_to_page_range (struct mm_struct * mm , unsigned long addr ,
2174
+ unsigned long size , pte_fn_t fn , void * data )
2175
+ {
2176
+ return __apply_to_page_range (mm , addr , size , fn , data , true);
2177
+ }
2140
2178
EXPORT_SYMBOL_GPL (apply_to_page_range );
2141
2179
2180
+ /*
2181
+ * Scan a region of virtual memory, calling a provided function on
2182
+ * each leaf page table where it exists.
2183
+ *
2184
+ * Unlike apply_to_page_range, this does _not_ fill in page tables
2185
+ * where they are absent.
2186
+ */
2187
+ int apply_to_existing_page_range (struct mm_struct * mm , unsigned long addr ,
2188
+ unsigned long size , pte_fn_t fn , void * data )
2189
+ {
2190
+ return __apply_to_page_range (mm , addr , size , fn , data , false);
2191
+ }
2192
+ EXPORT_SYMBOL_GPL (apply_to_existing_page_range );
2193
+
2142
2194
/*
2143
2195
* handle_pte_fault chooses page fault handler according to an entry which was
2144
2196
* read non-atomically. Before making any commitment, on those architectures
0 commit comments