@@ -173,62 +173,67 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
173
173
if (pvmw -> pte )
174
174
goto next_pte ;
175
175
restart :
176
- pgd = pgd_offset (mm , pvmw -> address );
177
- if (!pgd_present (* pgd ))
178
- return false;
179
- p4d = p4d_offset (pgd , pvmw -> address );
180
- if (!p4d_present (* p4d ))
181
- return false;
182
- pud = pud_offset (p4d , pvmw -> address );
183
- if (!pud_present (* pud ))
184
- return false;
185
- pvmw -> pmd = pmd_offset (pud , pvmw -> address );
186
- /*
187
- * Make sure the pmd value isn't cached in a register by the
188
- * compiler and used as a stale value after we've observed a
189
- * subsequent update.
190
- */
191
- pmde = READ_ONCE (* pvmw -> pmd );
192
- if (pmd_trans_huge (pmde ) || is_pmd_migration_entry (pmde )) {
193
- pvmw -> ptl = pmd_lock (mm , pvmw -> pmd );
194
- pmde = * pvmw -> pmd ;
195
- if (likely (pmd_trans_huge (pmde ))) {
196
- if (pvmw -> flags & PVMW_MIGRATION )
197
- return not_found (pvmw );
198
- if (pmd_page (pmde ) != page )
199
- return not_found (pvmw );
200
- return true;
201
- }
202
- if (!pmd_present (pmde )) {
203
- swp_entry_t entry ;
176
+ {
177
+ pgd = pgd_offset (mm , pvmw -> address );
178
+ if (!pgd_present (* pgd ))
179
+ return false;
180
+ p4d = p4d_offset (pgd , pvmw -> address );
181
+ if (!p4d_present (* p4d ))
182
+ return false;
183
+ pud = pud_offset (p4d , pvmw -> address );
184
+ if (!pud_present (* pud ))
185
+ return false;
204
186
205
- if (!thp_migration_supported () ||
206
- !(pvmw -> flags & PVMW_MIGRATION ))
207
- return not_found (pvmw );
208
- entry = pmd_to_swp_entry (pmde );
209
- if (!is_migration_entry (entry ) ||
210
- migration_entry_to_page (entry ) != page )
211
- return not_found (pvmw );
212
- return true;
213
- }
214
- /* THP pmd was split under us: handle on pte level */
215
- spin_unlock (pvmw -> ptl );
216
- pvmw -> ptl = NULL ;
217
- } else if (!pmd_present (pmde )) {
187
+ pvmw -> pmd = pmd_offset (pud , pvmw -> address );
218
188
/*
219
- * If PVMW_SYNC, take and drop THP pmd lock so that we
220
- * cannot return prematurely, while zap_huge_pmd() has
221
- * cleared *pmd but not decremented compound_mapcount() .
189
+ * Make sure the pmd value isn't cached in a register by the
190
+ * compiler and used as a stale value after we've observed a
191
+ * subsequent update .
222
192
*/
223
- if ((pvmw -> flags & PVMW_SYNC ) && PageTransCompound (page )) {
224
- spinlock_t * ptl = pmd_lock (mm , pvmw -> pmd );
193
+ pmde = READ_ONCE (* pvmw -> pmd );
225
194
226
- spin_unlock (ptl );
195
+ if (pmd_trans_huge (pmde ) || is_pmd_migration_entry (pmde )) {
196
+ pvmw -> ptl = pmd_lock (mm , pvmw -> pmd );
197
+ pmde = * pvmw -> pmd ;
198
+ if (likely (pmd_trans_huge (pmde ))) {
199
+ if (pvmw -> flags & PVMW_MIGRATION )
200
+ return not_found (pvmw );
201
+ if (pmd_page (pmde ) != page )
202
+ return not_found (pvmw );
203
+ return true;
204
+ }
205
+ if (!pmd_present (pmde )) {
206
+ swp_entry_t entry ;
207
+
208
+ if (!thp_migration_supported () ||
209
+ !(pvmw -> flags & PVMW_MIGRATION ))
210
+ return not_found (pvmw );
211
+ entry = pmd_to_swp_entry (pmde );
212
+ if (!is_migration_entry (entry ) ||
213
+ migration_entry_to_page (entry ) != page )
214
+ return not_found (pvmw );
215
+ return true;
216
+ }
217
+ /* THP pmd was split under us: handle on pte level */
218
+ spin_unlock (pvmw -> ptl );
219
+ pvmw -> ptl = NULL ;
220
+ } else if (!pmd_present (pmde )) {
221
+ /*
222
+ * If PVMW_SYNC, take and drop THP pmd lock so that we
223
+ * cannot return prematurely, while zap_huge_pmd() has
224
+ * cleared *pmd but not decremented compound_mapcount().
225
+ */
226
+ if ((pvmw -> flags & PVMW_SYNC ) &&
227
+ PageTransCompound (page )) {
228
+ spinlock_t * ptl = pmd_lock (mm , pvmw -> pmd );
229
+
230
+ spin_unlock (ptl );
231
+ }
232
+ return false;
227
233
}
228
- return false;
234
+ if (!map_pte (pvmw ))
235
+ goto next_pte ;
229
236
}
230
- if (!map_pte (pvmw ))
231
- goto next_pte ;
232
237
while (1 ) {
233
238
unsigned long end ;
234
239
0 commit comments