@@ -156,7 +156,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
156
156
if (pvmw -> pte )
157
157
goto next_pte ;
158
158
159
- if (unlikely (PageHuge (pvmw -> page ))) {
159
+ if (unlikely (PageHuge (page ))) {
160
160
/* when pud is not present, pte will be NULL */
161
161
pvmw -> pte = huge_pte_offset (mm , pvmw -> address , page_size (page ));
162
162
if (!pvmw -> pte )
@@ -217,8 +217,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
217
217
* cannot return prematurely, while zap_huge_pmd() has
218
218
* cleared *pmd but not decremented compound_mapcount().
219
219
*/
220
- if ((pvmw -> flags & PVMW_SYNC ) &&
221
- PageTransCompound (pvmw -> page )) {
220
+ if ((pvmw -> flags & PVMW_SYNC ) && PageTransCompound (page )) {
222
221
spinlock_t * ptl = pmd_lock (mm , pvmw -> pmd );
223
222
224
223
spin_unlock (ptl );
@@ -234,9 +233,9 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
234
233
return true;
235
234
next_pte :
236
235
/* Seek to next pte only makes sense for THP */
237
- if (!PageTransHuge (pvmw -> page ) || PageHuge (pvmw -> page ))
236
+ if (!PageTransHuge (page ) || PageHuge (page ))
238
237
return not_found (pvmw );
239
- end = vma_address_end (pvmw -> page , pvmw -> vma );
238
+ end = vma_address_end (page , pvmw -> vma );
240
239
do {
241
240
pvmw -> address += PAGE_SIZE ;
242
241
if (pvmw -> address >= end )
0 commit comments